code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ) ->List[Any]:
if start is None:
a__: str = 0
if end is None:
a__: Tuple = len(lowercase__ ) - 1
if start >= end:
return
a__: Optional[int] = (start + end) // 2
slowsort(lowercase__ , lowercase__ , lowercase__ )
slowsort(lowercase__ , mid + 1 , lowercase__ )
if sequence[end] < sequence[mid]:
a__: Union[str, Any] = sequence[mid], sequence[end]
slowsort(lowercase__ , lowercase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
# add QFormer tokenizer
_lowerCamelCase : int = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
encoding.update(lowercase )
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
encoding.update(lowercase )
return encoding
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A_ ( self , lowercase , **lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase )
return super().save_pretrained(lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' )
_lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase )
args.append(lowercase )
return cls(*lowercase ) | 96 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCAmelCase_ = namedtuple('covid_data', 'cases deaths recovered')
def lowerCAmelCase_ ( __UpperCAmelCase: int = "https://www.worldometers.info/coronavirus/" ) -> Tuple:
UpperCamelCase__ : Any = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(lowercase__ ).content ).xpath(lowercase__ ) )
UpperCAmelCase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 201 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase_( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : str , _snake_case : List[Any] = 100 , ):
"""simple docstring"""
__a =x_start
__a =fnc(lowercase__ )
__a =0.0
for _ in range(lowercase__ ):
# Approximates curve as a sequence of linear lines and sums their length
__a =(x_end - x_start) / steps + xa
__a =fnc(lowercase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__a =xa
__a =fxa
return length
if __name__ == "__main__":
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_lowerCAmelCase : Optional[int] = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 218 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( lowercase__ , lowercase__ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def _snake_case ( lowercase__ , lowercase__ ):
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Optional[int] = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : int = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase__ )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
_lowerCamelCase : List[Any] = temp_dist
_lowerCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( lowercase__ , lowercase__ ):
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase__ = get_logger(__name__)
class __lowerCAmelCase :
UpperCamelCase = '''dummy_data'''
UpperCamelCase = '''datasets'''
UpperCamelCase = False
def __init__( self : Any , A : Optional[Any] , A : str , A : Optional[int] , A : List[str] = None , A : Optional[Any] = False , A : Union[str, Any] = True , A : str = None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = dataset_name
_UpperCAmelCase = cache_dir
_UpperCAmelCase = use_local_dummy_data
_UpperCAmelCase = config
# download_callbacks take a single url as input
_UpperCAmelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_UpperCAmelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_UpperCAmelCase = str(A)
# to be downloaded
_UpperCAmelCase = None
_UpperCAmelCase = None
@property
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
if self._dummy_file is None:
_UpperCAmelCase = self.download_dummy_data()
return self._dummy_file
@property
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_UpperCAmelCase = cached_path(
A , cache_dir=self.cache_dir , extract_compressed_file=A , force_extract=A)
return os.path.join(A , self.dummy_file_name)
@property
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
if self._bucket_url is None:
_UpperCAmelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def _lowerCamelCase ( self : int , A : List[str] , *A : List[Any]) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_UpperCAmelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_UpperCAmelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A , A):
return self.create_dummy_data_dict(A , A)
elif isinstance(A , (list, tuple)):
return self.create_dummy_data_list(A , A)
else:
return self.create_dummy_data_single(A , A)
def _lowerCamelCase ( self : List[Any] , A : str , *A : Any) -> Optional[int]:
"""simple docstring"""
return self.download_and_extract(A)
def _lowerCamelCase ( self : Dict , A : str , A : Dict) -> List[str]:
"""simple docstring"""
return self.download_and_extract(A)
def _lowerCamelCase ( self : Optional[Any] , A : List[str] , *A : Any , **A : Optional[int]) -> Dict:
"""simple docstring"""
return path
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return {}
def _lowerCamelCase ( self : Optional[Any] , A : Union[str, Any] , A : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A , A):
for single_url in single_urls:
download_callback(A)
else:
_UpperCAmelCase = single_urls
download_callback(A)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A , A):
_UpperCAmelCase = [os.path.join(A , urllib.parse.quote_plus(Path(A).name)) for x in single_urls]
else:
_UpperCAmelCase = single_urls
_UpperCAmelCase = os.path.join(A , urllib.parse.quote_plus(Path(A).name))
_UpperCAmelCase = value
# make sure that values are unique
if all(isinstance(A , A) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_UpperCAmelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowerCamelCase ( self : List[str] , A : Union[str, Any] , A : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_UpperCAmelCase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , A)) for url in data_url)
_UpperCAmelCase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_UpperCAmelCase = [data_url[0]] * len(A)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCAmelCase = os.path.join(A , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(A)
return dummy_data_list
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : Any) -> List[Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(A)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCAmelCase = os.path.join(A , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(A) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : str) -> Any:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Dict , A : Union[str, Any]) -> str:
"""simple docstring"""
def _iter_archive_members(A : Optional[int]):
# this preserves the order of the members inside the ZIP archive
_UpperCAmelCase = Path(self.dummy_file).parent
_UpperCAmelCase = path.relative_to(A)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_UpperCAmelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(A)
_UpperCAmelCase = Path(A)
_UpperCAmelCase = _iter_archive_members(A) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(A).as_posix(), file_path.open('rb')
def _lowerCamelCase ( self : Optional[Any] , A : int) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A , A):
_UpperCAmelCase = [paths]
for path in paths:
if os.path.isfile(A):
if os.path.basename(A).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A):
if os.path.basename(A).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(A):
if filename.startswith(('.', '__')):
continue
yield os.path.join(A , A)
| 339 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
return " ".join(
"""""".join(word[::-1] ) if len(lowercase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 276 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowercase__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowercase__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ):
_lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ )
_lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCamelCase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCamelCase : Optional[Any] = [(pred, label)]
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ )
_lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' )
fas.append(lowercase__ )
_lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
_lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) )
_lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ )
_lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def A_ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def A_ ( self , lowercase , lowercase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "cb":
return acc_and_fa(lowercase , lowercase , fa_avg='macro' )
elif self.config_name == "record":
_lowerCamelCase : List[str] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowercase , lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase , lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) | 96 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
a : int = (720, 1280) # Height, Width
a : Dict = (0.4, 0.6) # if height or width lower than this scale, drop it.
a : List[Any] = 1 / 100
a : Dict = ''
a : Tuple = ''
a : Tuple = ''
a : List[str] = 250
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = get_dataset(lowercase__, lowercase__ )
for index in range(lowercase__ ):
snake_case_ = random.sample(range(len(lowercase__ ) ), 4 )
snake_case_ = update_image_and_anno(
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, filter_scale=lowercase__, )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ = random_chars(32 )
snake_case_ = path.split(os.sep )[-1].rsplit('''.''', 1 )[0]
snake_case_ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg", lowercase__, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
snake_case_ = []
for anno in new_annos:
snake_case_ = anno[3] - anno[1]
snake_case_ = anno[4] - anno[2]
snake_case_ = anno[1] + width / 2
snake_case_ = anno[2] + height / 2
snake_case_ = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(lowercase__ )
with open(F"{file_root}.txt", '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
for label_file in glob.glob(os.path.join(lowercase__, '''*.txt''' ) ):
snake_case_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(lowercase__ ) as in_file:
snake_case_ = in_file.readlines()
snake_case_ = os.path.join(lowercase__, F"{label_name}.jpg" )
snake_case_ = []
for obj_list in obj_lists:
snake_case_ = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case_ = float(obj[1] ) - float(obj[3] ) / 2
snake_case_ = float(obj[2] ) - float(obj[4] ) / 2
snake_case_ = float(obj[1] ) + float(obj[3] ) / 2
snake_case_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, ) -> Optional[int]:
'''simple docstring'''
snake_case_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta )
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = int(scale_x * output_size[1] )
snake_case_ = int(scale_y * output_size[0] )
snake_case_ = []
snake_case_ = []
for i, index in enumerate(lowercase__ ):
snake_case_ = all_img_list[index]
path_list.append(lowercase__ )
snake_case_ = all_annos[index]
snake_case_ = cva.imread(lowercase__ )
if i == 0: # top-left
snake_case_ = cva.resize(lowercase__, (divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = bbox[2] * scale_y
snake_case_ = bbox[3] * scale_x
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case_ = cva.resize(lowercase__, (output_size[1] - divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = bbox[2] * scale_y
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case_ = cva.resize(lowercase__, (divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = bbox[3] * scale_x
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case_ = cva.resize(
lowercase__, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case_ = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 56 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 96 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class a_ ( lowerCamelCase ):
lowercase = """ctrl"""
lowercase = ["""past_key_values"""]
lowercase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = dff
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 321 |
"""simple docstring"""
# Imports
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
if red is not None:
_lowerCamelCase : Optional[int] = red
if green is not None:
_lowerCamelCase : Optional[Any] = green
if blue is not None:
_lowerCamelCase : Tuple = blue
if red_edge is not None:
_lowerCamelCase : Optional[Any] = red_edge
if nir is not None:
_lowerCamelCase : Union[str, Any] = nir
return True
def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
_lowerCamelCase : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A_ ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A_ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A_ ( self ):
return self.nir * (self.red / (self.green**2))
def A_ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A_ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def A_ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def A_ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A_ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def A_ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A_ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A_ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A_ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A_ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A_ ( self ):
return (self.nir / self.green) - 1
def A_ ( self ):
return (self.nir / self.redEdge) - 1
def A_ ( self ):
return (self.red - self.blue) / self.red
def A_ ( self ):
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A_ ( self ):
return self.nir - self.green
def A_ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A_ ( self ):
_lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A_ ( self , lowercase=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def A_ ( self , lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A_ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A_ ( self , lowercase=None , lowercase=None ):
return (self.nir - b) / (a * self.red)
def A_ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A_ ( self ):
return (self.red + self.green + self.blue) / 30.5
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def A_ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A_ ( self ):
return self.green / (self.nir + self.red + self.green)
def A_ ( self ):
return self.nir / (self.nir + self.red + self.green)
def A_ ( self ):
return self.red / (self.nir + self.red + self.green)
def A_ ( self ):
return (self.green - self.red) / (self.green + self.red)
def A_ ( self ):
return (self.red - self.green) / (self.red + self.green)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A_ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def A_ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCAmelCase : List[str] = Features({"text": Value("string" )} )
__UpperCAmelCase : int = Features({"labels": ClassLabel} )
__UpperCAmelCase : int = "text"
__UpperCAmelCase : int = "labels"
def _lowercase ( self : Any, UpperCAmelCase__ : Tuple ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column], UpperCAmelCase__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _lowercase ( self : Any ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 17 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states | 96 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=10 , A_=3 , A_=2 , A_=2 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_="divided_space_time" , A_=None , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =num_channels
__UpperCamelCase =patch_size
__UpperCamelCase =num_frames
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =attention_type
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__UpperCamelCase =(image_size // patch_size) ** 2
__UpperCamelCase =(num_frames) * self.num_patches_per_frame + 1
def _a ( self ) -> List[Any]:
__UpperCamelCase =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__UpperCamelCase =self.num_labels
return config
def _a ( self , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TimesformerModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> int:
__UpperCamelCase =TimesformerForVideoClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
# verify the logits shape
__UpperCamelCase =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
def _a ( self ) -> int:
__UpperCamelCase =TimesformerModelTester(self )
__UpperCamelCase =ConfigTester(
self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self , A_ , A_ , A_=False ) -> Dict:
__UpperCamelCase =copy.deepcopy(A_ )
if return_labels:
if model_class in get_values(A_ ):
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def _a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def _a ( self ) -> Union[str, Any]:
pass
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*A_ )
@slow
def _a ( self ) -> Any:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TimesformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _a ( self ) -> Dict:
if not self.has_attentions:
pass
else:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =True
for model_class in self.all_model_classes:
__UpperCamelCase =self.model_tester.seq_length
__UpperCamelCase =self.model_tester.num_frames
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =True
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__UpperCamelCase =len(A_ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 1 , len(A_ ) )
__UpperCamelCase =outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _a ( self ) -> Optional[Any]:
def check_hidden_states_output(A_ , A_ , A_ ):
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =outputs.hidden_states
__UpperCamelCase =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A_ ) , A_ )
__UpperCamelCase =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase =True
check_hidden_states_output(A_ , A_ , A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__UpperCamelCase =np.load(lowercase__ )
return list(lowercase__ )
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> Any:
__UpperCamelCase =TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_video()
__UpperCamelCase =image_processor(video[:8] , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 62 |
"""simple docstring"""
lowercase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowercase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = from_type.lower().strip('s' )
_lowerCamelCase : List[Any] = to_type.lower().strip('s' )
_lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
_lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
if from_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Tuple = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
if to_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Any = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
_lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized]
_lowerCamelCase : int = METRIC_CONVERSION[to_sanitized]
_lowerCamelCase : List[str] = 1
if from_exponent > to_exponent:
_lowerCamelCase : List[str] = from_exponent - to_exponent
else:
_lowerCamelCase : List[Any] = -(to_exponent - from_exponent)
return value * pow(10 , lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 96 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase__ ( _lowerCamelCase : Any ) -> Union[str, Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Optional[Any]:
lowerCamelCase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCamelCase_ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
lowerCamelCase_ = PipelineDataFormat.from_str(
format=lowercase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase__ , lowercase__ )
class a ( __snake_case ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ) -> List[str]:
lowerCamelCase_ = nlp
lowerCamelCase_ = reader
@staticmethod
def UpperCamelCase ( __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
lowerCamelCase_ = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=__SCREAMING_SNAKE_CASE , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=__SCREAMING_SNAKE_CASE , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=__SCREAMING_SNAKE_CASE , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=__SCREAMING_SNAKE_CASE , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=__SCREAMING_SNAKE_CASE , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=__SCREAMING_SNAKE_CASE , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=__SCREAMING_SNAKE_CASE , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple ) -> str:
lowerCamelCase_ = self._nlp, []
for entry in self._reader:
lowerCamelCase_ = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCamelCase_ = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 183 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Any = logging.get_logger(__name__)
a_ :List[Any] = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """blip_text_model"""
def __init__( self : Optional[int], _snake_case : int=3_0_5_2_4, _snake_case : Tuple=7_6_8, _snake_case : str=7_6_8, _snake_case : Tuple=3_0_7_2, _snake_case : str=7_6_8, _snake_case : Any=1_2, _snake_case : Optional[Any]=8, _snake_case : Dict=5_1_2, _snake_case : Union[str, Any]="gelu", _snake_case : int=1e-12, _snake_case : Any=0.0, _snake_case : Dict=0.0, _snake_case : List[str]=0.0_2, _snake_case : Optional[Any]=3_0_5_2_2, _snake_case : Dict=2, _snake_case : Dict=0, _snake_case : int=1_0_2, _snake_case : str=True, _snake_case : int=True, **_snake_case : Any, ) ->Union[str, Any]:
super().__init__(
pad_token_id=_snake_case, bos_token_id=_snake_case, eos_token_id=_snake_case, sep_token_id=_snake_case, **_snake_case, )
snake_case__ : List[str] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Optional[int] = encoder_hidden_size
snake_case__ : List[Any] = intermediate_size
snake_case__ : Optional[Any] = projection_dim
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Tuple = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : List[Any] = hidden_act
snake_case__ : Tuple = initializer_range
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[str] = is_decoder
snake_case__ : Optional[Any] = use_cache
@classmethod
def lowercase_ ( cls : str, _snake_case : str, **_snake_case : int ) ->Any:
cls._set_token_in_kwargs(_snake_case )
snake_case__ : int = cls.get_config_dict(_snake_case, **_snake_case )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
snake_case__ : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case, **_snake_case )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """blip_vision_model"""
def __init__( self : str, _snake_case : Union[str, Any]=7_6_8, _snake_case : List[str]=3_0_7_2, _snake_case : Union[str, Any]=5_1_2, _snake_case : Union[str, Any]=1_2, _snake_case : int=1_2, _snake_case : Union[str, Any]=3_8_4, _snake_case : Union[str, Any]=1_6, _snake_case : Tuple="gelu", _snake_case : Optional[Any]=1e-5, _snake_case : Tuple=0.0, _snake_case : Any=1e-10, **_snake_case : Dict, ) ->Union[str, Any]:
super().__init__(**_snake_case )
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[str] = intermediate_size
snake_case__ : Any = projection_dim
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[Any] = patch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : Tuple = hidden_act
@classmethod
def lowercase_ ( cls : int, _snake_case : List[Any], **_snake_case : str ) ->Tuple:
cls._set_token_in_kwargs(_snake_case )
snake_case__ : List[Any] = cls.get_config_dict(_snake_case, **_snake_case )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
snake_case__ : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case, **_snake_case )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """blip"""
_SCREAMING_SNAKE_CASE = True
def __init__( self : Union[str, Any], _snake_case : str=None, _snake_case : List[str]=None, _snake_case : int=5_1_2, _snake_case : Optional[Any]=2.6_5_9_2, _snake_case : Union[str, Any]=2_5_6, **_snake_case : int, ) ->List[str]:
super().__init__(**_snake_case )
if text_config is None:
snake_case__ : int = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
snake_case__ : Tuple = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
snake_case__ : Dict = BlipTextConfig(**_snake_case )
snake_case__ : Optional[Any] = BlipVisionConfig(**_snake_case )
snake_case__ : int = self.vision_config.hidden_size
snake_case__ : List[str] = projection_dim
snake_case__ : List[Any] = logit_scale_init_value
snake_case__ : str = 1.0
snake_case__ : List[Any] = 0.0_2
snake_case__ : List[Any] = image_text_hidden_size
@classmethod
def lowercase_ ( cls : Optional[Any], _snake_case : Optional[int], _snake_case : Optional[Any], **_snake_case : str ) ->List[str]:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **_snake_case )
def lowercase_ ( self : Dict ) ->Union[str, Any]:
snake_case__ : Dict = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.text_config.to_dict()
snake_case__ : str = self.vision_config.to_dict()
snake_case__ : Optional[Any] = self.__class__.model_type
return output
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 0 |
"""simple docstring"""
from math import factorial
def __a ( _SCREAMING_SNAKE_CASE = 100 ) ->Tuple:
return sum(map(lowercase__ , str(factorial(lowercase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 290 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
from collections.abc import Sequence
from queue import Queue
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=None, __magic_name__=None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = start
UpperCamelCase__ : Optional[Any] = end
UpperCamelCase__ : Optional[int] = val
UpperCamelCase__ : List[str] = (start + end) // 2
UpperCamelCase__ : Any = left
UpperCamelCase__ : Tuple = right
def __repr__( self ) -> Union[str, Any]:
"""simple docstring"""
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = collection
UpperCamelCase__ : Tuple = function
if self.collection:
UpperCamelCase__ : str = self._build_tree(0, len(__magic_name__ ) - 1 )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
self._update_tree(self.root, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> List[Any]:
"""simple docstring"""
return self._query_range(self.root, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
if start == end:
return SegmentTreeNode(__magic_name__, __magic_name__, self.collection[start] )
UpperCamelCase__ : int = (start + end) // 2
UpperCamelCase__ : Any = self._build_tree(__magic_name__, __magic_name__ )
UpperCamelCase__ : Union[str, Any] = self._build_tree(mid + 1, __magic_name__ )
return SegmentTreeNode(__magic_name__, __magic_name__, self.fn(left.val, right.val ), __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
if node.start == i and node.end == i:
UpperCamelCase__ : Any = val
return
if i <= node.mid:
self._update_tree(node.left, __magic_name__, __magic_name__ )
else:
self._update_tree(node.right, __magic_name__, __magic_name__ )
UpperCamelCase__ : List[str] = self.fn(node.left.val, node.right.val )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Dict:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left, __magic_name__, __magic_name__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left, __magic_name__, node.mid ), self._query_range(node.right, node.mid + 1, __magic_name__ ), )
else:
# range in right child tree
return self._query_range(node.right, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
if self.root is not None:
UpperCamelCase__ : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
UpperCamelCase__ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
UpperCAmelCase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 201 |
"""simple docstring"""
import functools
from typing import Any
def _snake_case ( lowercase__ , lowercase__ ):
# Validation
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase__ , lowercase__ ) or not all(
isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase : dict[str, Any] = {}
_lowerCamelCase : List[Any] = 'WORD_KEEPER'
for word in words:
_lowerCamelCase : Dict = trie
for c in word:
if c not in trie_node:
_lowerCamelCase : Any = {}
_lowerCamelCase : str = trie_node[c]
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Dict = len(lowercase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase__ ) -> bool:
if index == len_string:
return True
_lowerCamelCase : List[Any] = trie
for i in range(lowercase__ , lowercase__ ):
_lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ )
if trie_node is None:
return False
if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCAmelCase : int = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = True , ) -> Any:
'''simple docstring'''
__a =[file for file in os.listdir(__snake_case ) if os.path.isfile(os.path.join(__snake_case , __snake_case ) )]
if identifier is not None:
__a =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__snake_case , __snake_case ):
for n_ in n_identifier:
__a =[file for file in files if n_ not in file]
else:
__a =[file for file in files if n_identifier not in file]
__a =ignore_files or []
ignore_files.append('__init__.py' )
__a =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , __snake_case )
if only_modules:
__a =file.split('.' )[0]
try:
__a =getattr(__snake_case , __snake_case )
__a =doctest.DocTestSuite(__snake_case )
__a =unittest.TextTestRunner().run(__snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
__a =doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =Path('src/transformers' )
__a ='modeling'
__a =[
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(__snake_case , identifier=__snake_case , ignore_files=__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =Path('src/transformers' )
__a ='tokenization'
self.analyze_directory(__snake_case , identifier=__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =Path('src/transformers' )
__a ='configuration'
self.analyze_directory(__snake_case , identifier=__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =Path('src/transformers' )
__a =['configuration', 'modeling', 'tokenization']
self.analyze_directory(__snake_case , n_identifier=__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =Path('docs/source' )
__a =['favicon.ico']
self.analyze_directory(__snake_case , ignore_files=__snake_case , only_modules=__snake_case )
| 218 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
sd_pipe.set_scheduler('sample_euler')
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sd_pipe([prompt] , generator=A , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
sd_pipe.set_scheduler('sample_euler')
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sd_pipe([prompt] , generator=A , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
sd_pipe.set_scheduler('sample_dpmpp_2m')
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=A , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 339 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ = 16
lowercase__ = 32
def _snake_case ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ):
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ )
_lowerCamelCase : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : int = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCamelCase : int = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ , lowercase__ ):
# Initialize accelerator
_lowerCamelCase : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Optional[int] = config['lr']
_lowerCamelCase : Optional[int] = int(config['num_epochs'] )
_lowerCamelCase : Union[str, Any] = int(config['seed'] )
_lowerCamelCase : Optional[int] = int(config['batch_size'] )
_lowerCamelCase : Dict = args.model_name_or_path
set_seed(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
_lowerCamelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCamelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCamelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCamelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
_lowerCamelCase : Any = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCamelCase : Dict = 0
# Now we train the model
_lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
_lowerCamelCase : List[Any] = model(**lowercase__ )
_lowerCamelCase : int = outputs.loss
_lowerCamelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCamelCase : Union[str, Any] = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**lowercase__ )
_lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCamelCase, _lowerCamelCase : List[str] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
_lowerCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
_lowerCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
_lowerCamelCase : Tuple = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_lowerCamelCase : str = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ):
_lowerCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowercase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase__ , )
parser.add_argument(
'--output_dir' , type=lowercase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowercase__ , default=lowercase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowercase__ , default=3 , help='Number of train epochs.' , )
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : str = DDIMPipeline
__UpperCamelCase : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
__UpperCamelCase : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_a : List[Any] =UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
_a : List[str] =DDIMScheduler()
_a : Optional[int] ={'unet': unet, 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any=0 ) -> Dict:
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
_a : Dict =torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
_a : List[str] =torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
_a : Tuple ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
_a : Any ='cpu'
_a : Tuple =self.get_dummy_components()
_a : Optional[Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
_a : int =pipe(**SCREAMING_SNAKE_CASE ).images
_a : Any =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
_a : Tuple =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_a : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-3 )
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
_a : Optional[Any] ='google/ddpm-cifar10-32'
_a : Optional[Any] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Dict =DDIMScheduler()
_a : Dict =DDIMPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ddim.to(SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : List[str] =torch.manual_seed(0 )
_a : str =ddim(generator=SCREAMING_SNAKE_CASE , eta=0.0 , output_type="""numpy""" ).images
_a : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_a : List[Any] =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] ='google/ddpm-ema-bedroom-256'
_a : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Optional[int] =DDIMPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ddpm.to(SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.manual_seed(0 )
_a : int =ddpm(generator=SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
_a : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_a : str =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 276 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """new-model"""
if is_tf_available():
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def A_ ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
_lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCamelCase : Dict = ['FunnelBaseModel']
_lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
try:
AutoConfig.register('new-model' , lowercase )
_lowerCamelCase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() )
_lowerCamelCase : int = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A_ ( self ):
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def A_ ( self ):
# Make sure we have cached the model.
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 96 | 0 |
'''simple docstring'''
a : Dict = range(2, 20 + 1)
a : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
a : Optional[Any] = {}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = sum(a_i[j] for j in range(lowercase__, len(lowercase__ ) ) )
snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ), lowercase__ ) ) )
snake_case_ = 0, 0
snake_case_ = n - i
snake_case_ = memo.get(lowercase__ )
if sub_memo is not None:
snake_case_ = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
snake_case_ = -1
for _k in range(len(lowercase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ = _k
break
if max_jump >= 0:
snake_case_ = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ = diff + c
for j in range(min(lowercase__, len(lowercase__ ) ) ):
snake_case_ = divmod(lowercase__, 10 )
if new_c > 0:
add(lowercase__, lowercase__, lowercase__ )
else:
snake_case_ = []
else:
snake_case_ = {c: []}
snake_case_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ = next_term(lowercase__, k - 1, i + dn, lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ = compute(lowercase__, lowercase__, i + dn, lowercase__ )
diff += _diff
dn += terms_jumped
snake_case_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__, (diff, dn, k) )
return (diff, dn)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ = i
snake_case_ = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ = ds_c + ds_b
diff += addend
snake_case_ = 0
for j in range(lowercase__ ):
snake_case_ = a_i[j] + addend
snake_case_ = divmod(lowercase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__, lowercase__, lowercase__ )
return diff, i - start_i
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
for j in range(lowercase__, len(lowercase__ ) ):
snake_case_ = digits[j] + addend
if s >= 10:
snake_case_ = divmod(lowercase__, 10 )
snake_case_ = addend // 10 + quotient
else:
snake_case_ = s
snake_case_ = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_ = divmod(lowercase__, 10 )
digits.append(lowercase__ )
def __magic_name__ ( __UpperCAmelCase = 10**15 ) -> Any:
'''simple docstring'''
snake_case_ = [1]
snake_case_ = 1
snake_case_ = 0
while True:
snake_case_ = next_term(lowercase__, 20, i + dn, lowercase__ )
dn += terms_jumped
if dn == n - i:
break
snake_case_ = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE__ = 'src/transformers'
SCREAMING_SNAKE_CASE__ = 'docs/source/en/tasks'
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
# Find the start prompt.
UpperCamelCase = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
UpperCamelCase = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE__ = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE__ = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowercase__ ( __UpperCamelCase )-> Dict:
UpperCamelCase = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
UpperCamelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False )-> str:
UpperCamelCase = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
UpperCamelCase = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
""" to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 321 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{sampling_rate}'''
_lowerCamelCase : str = '1'
_lowerCamelCase : str = 'f32le'
_lowerCamelCase : Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_lowerCamelCase : List[Any] = output_stream[0]
_lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ):
_lowerCamelCase : Optional[Any] = f'''{sampling_rate}'''
_lowerCamelCase : List[str] = '1'
if format_for_conversion == "s16le":
_lowerCamelCase : List[str] = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
_lowerCamelCase : Dict = platform.system()
if system == "Linux":
_lowerCamelCase : Optional[int] = 'alsa'
_lowerCamelCase : Optional[Any] = 'default'
elif system == "Darwin":
_lowerCamelCase : Optional[int] = 'avfoundation'
_lowerCamelCase : Any = ':0'
elif system == "Windows":
_lowerCamelCase : Tuple = 'dshow'
_lowerCamelCase : Tuple = 'default'
_lowerCamelCase : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ):
if stream_chunk_s is not None:
_lowerCamelCase : int = stream_chunk_s
else:
_lowerCamelCase : Optional[Any] = chunk_length_s
_lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
_lowerCamelCase : List[str] = np.intaa
_lowerCamelCase : str = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : Any = np.floataa
_lowerCamelCase : List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
_lowerCamelCase : Union[str, Any] = chunk_length_s / 6
_lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
_lowerCamelCase : Any = [stride_length_s, stride_length_s]
_lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCamelCase : List[Any] = datetime.datetime.now()
_lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
_lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ )
_lowerCamelCase : int = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_lowerCamelCase : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
_lowerCamelCase : int = B''
_lowerCamelCase, _lowerCamelCase : Dict = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
_lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
_lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
_lowerCamelCase : str = (_stride_left, stride_right)
_lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_lowerCamelCase : List[Any] = False
yield item
_lowerCamelCase : Optional[Any] = stride_left
_lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
_lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_lowerCamelCase : Tuple = False
yield item
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
_lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 96 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = KandinskyInpaintPipeline
__UpperCAmelCase : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__UpperCAmelCase : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__UpperCAmelCase : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCAmelCase : Any = False
@property
def _lowercase ( self : List[str] ):
return 3_2
@property
def _lowercase ( self : Optional[int] ):
return 3_2
@property
def _lowercase ( self : Tuple ):
return self.time_input_dim
@property
def _lowercase ( self : str ):
return self.time_input_dim * 4
@property
def _lowercase ( self : Optional[int] ):
return 1_0_0
@property
def _lowercase ( self : Tuple ):
__lowercase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _lowercase ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_0_0_5, )
__lowercase = MultilingualCLIP(UpperCAmelCase__ )
__lowercase = text_encoder.eval()
return text_encoder
@property
def _lowercase ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowercase = UNetaDConditionModel(**UpperCAmelCase__ )
return model
@property
def _lowercase ( self : str ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self : int ):
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self : List[str] ):
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule="linear", beta_start=0.00_085, beta_end=0.012, clip_sample=UpperCAmelCase__, set_alpha_to_one=UpperCAmelCase__, steps_offset=1, prediction_type="epsilon", thresholding=UpperCAmelCase__, )
__lowercase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowercase ( self : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=0 ):
__lowercase = floats_tensor((1, self.cross_attention_dim), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowercase = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(UpperCAmelCase__ )
# create init_image
__lowercase = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__lowercase = image.cpu().permute(0, 2, 3, 1 )[0]
__lowercase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
__lowercase = np.ones((6_4, 6_4), dtype=np.floataa )
__lowercase = 0
if str(UpperCAmelCase__ ).startswith("mps" ):
__lowercase = torch.manual_seed(UpperCAmelCase__ )
else:
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__lowercase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _lowercase ( self : Any ):
__lowercase = 'cpu'
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**UpperCAmelCase__ )
__lowercase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ), return_dict=UpperCAmelCase__, )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _lowercase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ):
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowercase = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
__lowercase = 0
__lowercase = 'a hat'
__lowercase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase__ )
__lowercase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.floataa )
__lowercase = pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe_prior(
UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=5, negative_prompt="", ).to_tuple()
__lowercase = pipeline(
UpperCAmelCase__, image=UpperCAmelCase__, mask_image=UpperCAmelCase__, image_embeds=UpperCAmelCase__, negative_image_embeds=UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type="np", )
__lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__, UpperCAmelCase__ )
| 17 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """ctrl"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = n_positions
_lowerCamelCase : Optional[int] = n_embd
_lowerCamelCase : str = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : Any = dff
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : str = use_cache
super().__init__(**lowercase ) | 96 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "speech_to_text_2"
UpperCAmelCase__ : Optional[int] = ["past_key_values"]
UpperCAmelCase__ : Tuple = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=10000 , A_=6 , A_=2048 , A_=4 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=2 , A_=True , A_=1 , A_=0 , A_=2 , A_=1024 , **A_ , ) -> str:
__UpperCamelCase =vocab_size
__UpperCamelCase =d_model
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =use_cache
__UpperCamelCase =decoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =max_target_positions
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , **A_ , )
| 62 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] = 1.0 , __SCREAMING_SNAKE_CASE : str = None , ) -> List[Any]:
super().__init__()
lowerCamelCase_ = initial_learning_rate
lowerCamelCase_ = warmup_steps
lowerCamelCase_ = power
lowerCamelCase_ = decay_schedule_fn
lowerCamelCase_ = name
def __call__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCamelCase_ = tf.cast(__SCREAMING_SNAKE_CASE , tf.floataa )
lowerCamelCase_ = tf.cast(self.warmup_steps , tf.floataa )
lowerCamelCase_ = global_step_float / warmup_steps_float
lowerCamelCase_ = self.initial_learning_rate * tf.math.pow(__SCREAMING_SNAKE_CASE , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( self : int ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] = 0.0 , _lowerCamelCase : Tuple = 0.9 , _lowerCamelCase : List[str] = 0.9_99 , _lowerCamelCase : Any = 1e-8 , _lowerCamelCase : str = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Dict = 0.0 , _lowerCamelCase : str = 1.0 , _lowerCamelCase : Tuple = None , ) -> Optional[int]:
lowerCamelCase_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowercase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowercase__ , )
if num_warmup_steps:
lowerCamelCase_ = WarmUp(
initial_learning_rate=lowercase__ , decay_schedule_fn=lowercase__ , warmup_steps=lowercase__ , )
if weight_decay_rate > 0.0:
lowerCamelCase_ = AdamWeightDecay(
learning_rate=lowercase__ , weight_decay_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowercase__ , )
else:
lowerCamelCase_ = tf.keras.optimizers.Adam(
learning_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a ( __snake_case ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] = 0.001 , __SCREAMING_SNAKE_CASE : List[Any] = 0.9 , __SCREAMING_SNAKE_CASE : Tuple = 0.999 , __SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-7 , __SCREAMING_SNAKE_CASE : Optional[int] = False , __SCREAMING_SNAKE_CASE : Tuple = 0.0 , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : List[Any] = None , __SCREAMING_SNAKE_CASE : Any = "AdamWeightDecay" , **__SCREAMING_SNAKE_CASE : Dict , ) -> int:
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = weight_decay_rate
lowerCamelCase_ = include_in_weight_decay
lowerCamelCase_ = exclude_from_weight_decay
@classmethod
def UpperCamelCase ( cls : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = {'WarmUp': WarmUp}
return super(__SCREAMING_SNAKE_CASE , cls ).from_config(__SCREAMING_SNAKE_CASE , custom_objects=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
super(__SCREAMING_SNAKE_CASE , self )._prepare_local(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Tuple:
lowerCamelCase_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : int ) -> Tuple:
lowerCamelCase_ = list(zip(*__SCREAMING_SNAKE_CASE ) )
return super(__SCREAMING_SNAKE_CASE , self ).apply_gradients(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , name=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ) -> Tuple:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCamelCase_ = apply_state or {}
lowerCamelCase_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCamelCase_ = self._fallback_apply_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> str:
lowerCamelCase_ = self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tf.control_dependencies([decay] ):
return super(__SCREAMING_SNAKE_CASE , self )._resource_apply_dense(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=None ) -> Optional[int]:
lowerCamelCase_ = self._get_lr(var.device , var.dtype.base_dtype , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self._decay_weights_op(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tf.control_dependencies([decay] ):
return super(__SCREAMING_SNAKE_CASE , self )._resource_apply_sparse(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> Any:
lowerCamelCase_ = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is not None:
return False
return True
class a ( __snake_case ):
def __init__( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = []
lowerCamelCase_ = None
@property
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
if self._accum_steps is None:
lowerCamelCase_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
if not self._gradients:
lowerCamelCase_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__SCREAMING_SNAKE_CASE ) , trainable=__SCREAMING_SNAKE_CASE , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__SCREAMING_SNAKE_CASE ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(__SCREAMING_SNAKE_CASE )}''' )
for accum_gradient, gradient in zip(self._gradients , __SCREAMING_SNAKE_CASE ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__SCREAMING_SNAKE_CASE )
self._accum_steps.assign_add(1 )
def UpperCamelCase ( self : List[Any] ) -> List[str]:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__SCREAMING_SNAKE_CASE ) )
| 183 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = """dummy_data"""
lowerCamelCase__ = """datasets"""
lowerCamelCase__ = False
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ):
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Dict = dataset_name
_lowerCamelCase : Union[str, Any] = cache_dir
_lowerCamelCase : Dict = use_local_dummy_data
_lowerCamelCase : Tuple = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : str = str(lowercase )
# to be downloaded
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : int = None
@property
def A_ ( self ):
if self._dummy_file is None:
_lowerCamelCase : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def A_ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def A_ ( self ):
_lowerCamelCase : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : int = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase )
return os.path.join(lowercase , self.dummy_file_name )
@property
def A_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self ):
if self._bucket_url is None:
_lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def A_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def A_ ( self , lowercase , *lowercase ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase ):
return self.create_dummy_data_dict(lowercase , lowercase )
elif isinstance(lowercase , (list, tuple) ):
return self.create_dummy_data_list(lowercase , lowercase )
else:
return self.create_dummy_data_single(lowercase , lowercase )
def A_ ( self , lowercase , *lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , *lowercase , **lowercase ):
return path
def A_ ( self ):
return {}
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase ):
for single_url in single_urls:
download_callback(lowercase )
else:
_lowerCamelCase : List[Any] = single_urls
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase ):
_lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Optional[int] = single_urls
_lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) )
_lowerCamelCase : int = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url )
_lowerCamelCase : int = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : List[str] = [data_url[0]] * len(lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(lowercase )
return dummy_data_list
def A_ ( self , lowercase , lowercase ):
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self , lowercase ):
def _iter_archive_members(lowercase ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : str = Path(self.dummy_file ).parent
_lowerCamelCase : Union[str, Any] = path.relative_to(lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase )
_lowerCamelCase : Optional[int] = Path(lowercase )
_lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' )
def A_ ( self , lowercase ):
if not isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(lowercase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(lowercase , lowercase ) | 96 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a_ :int = "pt"
elif is_tf_available():
a_ :List[str] = "tf"
else:
a_ :str = "jax"
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PerceiverTokenizer
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Any ) ->Optional[Any]:
super().setUp()
snake_case__ : Optional[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self : str ) ->Optional[Any]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self : int, **_snake_case : Any ) ->Optional[Any]:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_snake_case )
def lowercase_ ( self : Tuple, _snake_case : List[Any], _snake_case : List[str]=False, _snake_case : Any=2_0, _snake_case : str=5 ) ->Any:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
snake_case__ : List[Any] = []
for i in range(len(_snake_case ) ):
try:
snake_case__ : Tuple = tokenizer.decode([i], clean_up_tokenization_spaces=_snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
snake_case__ : Optional[int] = list(filter(lambda _snake_case : re.match(R'^[ a-zA-Z]+$', t[1] ), _snake_case ) )
snake_case__ : Any = list(filter(lambda _snake_case : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_snake_case ), _snake_case ) )
if max_length is not None and len(_snake_case ) > max_length:
snake_case__ : List[str] = toks[:max_length]
if min_length is not None and len(_snake_case ) < min_length and len(_snake_case ) > 0:
while len(_snake_case ) < min_length:
snake_case__ : int = toks + toks
# toks_str = [t[1] for t in toks]
snake_case__ : str = [t[0] for t in toks]
# Ensure consistency
snake_case__ : Dict = tokenizer.decode(_snake_case, clean_up_tokenization_spaces=_snake_case )
if " " not in output_txt and len(_snake_case ) > 1:
snake_case__ : str = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_snake_case )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_snake_case )
)
if with_prefix_space:
snake_case__ : Optional[Any] = ' ' + output_txt
snake_case__ : Tuple = tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
return output_txt, output_ids
def lowercase_ ( self : Optional[int] ) ->str:
snake_case__ : Optional[Any] = self.perceiver_tokenizer
snake_case__ : Dict = 'Unicode €.'
snake_case__ : int = tokenizer(_snake_case )
snake_case__ : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], _snake_case )
# decoding
snake_case__ : Optional[int] = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case, '[CLS]Unicode €.[SEP]' )
snake_case__ : Union[str, Any] = tokenizer('e è é ê ë' )
snake_case__ : Tuple = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], _snake_case )
# decoding
snake_case__ : int = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self : Union[str, Any] ) ->int:
snake_case__ : Optional[Any] = self.perceiver_tokenizer
snake_case__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
snake_case__ : Dict = tokenizer(_snake_case, padding=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
if FRAMEWORK != "jax":
snake_case__ : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
snake_case__ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_snake_case, _snake_case )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def lowercase_ ( self : str ) ->List[str]:
snake_case__ : List[Any] = self.perceiver_tokenizer
snake_case__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case__ : List[str] = tokenizer(_snake_case, padding=_snake_case, return_tensors=_snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', _snake_case )
self.assertIn('attention_mask', _snake_case )
self.assertNotIn('decoder_input_ids', _snake_case )
self.assertNotIn('decoder_attention_mask', _snake_case )
def lowercase_ ( self : str ) ->Dict:
snake_case__ : str = self.perceiver_tokenizer
snake_case__ : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
snake_case__ : Optional[int] = tokenizer(
text_target=_snake_case, max_length=3_2, padding='max_length', truncation=_snake_case, return_tensors=_snake_case )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def lowercase_ ( self : Optional[int] ) ->Any:
# safety check on max_len default value so we are sure the test works
snake_case__ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
snake_case__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : List[str] = ' He is very happy, UNwant\u00E9d,running'
snake_case__ : List[Any] = tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
snake_case__ : Any = tokenizer.__class__.from_pretrained(_snake_case )
snake_case__ : Optional[int] = after_tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
self.assertListEqual(_snake_case, _snake_case )
shutil.rmtree(_snake_case )
snake_case__ : Optional[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
snake_case__ : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
snake_case__ : Optional[int] = tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
snake_case__ : str = tokenizer.__class__.from_pretrained(_snake_case )
snake_case__ : Any = after_tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
self.assertListEqual(_snake_case, _snake_case )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
snake_case__ : List[Any] = tokenizer.__class__.from_pretrained(_snake_case, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(_snake_case )
def lowercase_ ( self : Tuple ) ->Optional[int]:
snake_case__ : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
snake_case__ : Tuple = json.load(_snake_case )
with open(os.path.join(_snake_case, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
snake_case__ : List[str] = json.load(_snake_case )
snake_case__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
snake_case__ : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_snake_case, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_snake_case, _snake_case )
with open(os.path.join(_snake_case, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(_snake_case, _snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case__ : Optional[int] = tokenizer_class.from_pretrained(
_snake_case, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case__ : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=_snake_case )]
snake_case__ : Any = tokenizer_class.from_pretrained(
_snake_case, additional_special_tokens=_snake_case, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def lowercase_ ( self : int ) ->Any:
snake_case__ : Optional[int] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def lowercase_ ( self : str ) ->str:
pass
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
pass
def lowercase_ ( self : Optional[int] ) ->str:
pass
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
pass
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
snake_case__ : List[Any] = self.get_tokenizers(fast=_snake_case, do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case__ : Dict = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_string(_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 96 | 0 |
"""simple docstring"""
import math
class __snake_case :
def __init__( self , lowercase=0) -> List[str]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
a__: List[str] = n
a__: str = [
[math.inf for j in range(0 , lowercase)] for i in range(0 , lowercase)
] # adjacency matrix for weight
a__: List[str] = [
[math.inf for j in range(0 , lowercase)] for i in range(0 , lowercase)
] # dp[i][j] stores minimum distance from i to j
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = w
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
a__: Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowercase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 290 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
# add QFormer tokenizer
_lowerCamelCase : int = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
encoding.update(lowercase )
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
encoding.update(lowercase )
return encoding
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A_ ( self , lowercase , **lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase )
return super().save_pretrained(lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' )
_lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase )
args.append(lowercase )
return cls(*lowercase ) | 96 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=7, __magic_name__=True, __magic_name__=True, __magic_name__=False, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=5, __magic_name__=4, __magic_name__=37, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=3, __magic_name__=4, __magic_name__=None, ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[Any] = seq_length
UpperCamelCase__ : Optional[int] = is_training
UpperCamelCase__ : Tuple = use_input_mask
UpperCamelCase__ : str = use_token_type_ids
UpperCamelCase__ : List[Any] = use_labels
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : List[Any] = max_position_embeddings
UpperCamelCase__ : int = type_vocab_size
UpperCamelCase__ : Dict = type_sequence_label_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : int = num_labels
UpperCamelCase__ : Tuple = num_choices
UpperCamelCase__ : int = scope
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : Tuple = None
if self.use_input_mask:
UpperCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Any = None
if self.use_token_type_ids:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ : Dict = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__magic_name__, initializer_range=self.initializer_range, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : List[str] = model(__magic_name__, attention_mask=__magic_name__ )
UpperCamelCase__ : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : Optional[Any] = model(__magic_name__, attention_mask=__magic_name__, token_type_ids=__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, *__magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCamelCase__ : List[str] = torch.ones(input_ids.shape, dtype=torch.long, device=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.seq_length // 2
UpperCamelCase__ : List[str] = 0
# first forward pass
UpperCamelCase__ : Optional[int] = model(__magic_name__, attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 1), config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ : Tuple = ids_tensor((1,), __magic_name__ ).item() + 1
UpperCamelCase__ : Tuple = ids_tensor((self.batch_size, 1), config.vocab_size ).squeeze(-1 )
UpperCamelCase__ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCamelCase__ : Optional[int] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=__magic_name__ )], dim=1, )
# get two different outputs
UpperCamelCase__ : str = model(__magic_name__, attention_mask=__magic_name__ )['last_hidden_state']
UpperCamelCase__ : str = model(__magic_name__, past_key_values=__magic_name__, attention_mask=__magic_name__ )['last_hidden_state']
# select random slice
UpperCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCamelCase__ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__, __magic_name__, atol=1E-3 ) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, *__magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCamelCase__ : List[str] = torch.ones(input_ids.shape, dtype=torch.long, device=__magic_name__ )
# first forward pass
UpperCamelCase__ : List[Any] = model(__magic_name__, attention_mask=__magic_name__, use_cache=__magic_name__ )
UpperCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 3), 2 )
# append to next input_ids and
UpperCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCamelCase__ : Optional[Any] = torch.cat([attention_mask, next_attn_mask], dim=-1 )
UpperCamelCase__ : List[Any] = model(__magic_name__, attention_mask=__magic_name__ )['last_hidden_state']
UpperCamelCase__ : Dict = model(__magic_name__, attention_mask=__magic_name__, past_key_values=__magic_name__ )[
'last_hidden_state'
]
# select random slice
UpperCamelCase__ : Optional[Any] = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCamelCase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__, __magic_name__, atol=1E-3 ) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, *__magic_name__, __magic_name__=False ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ : List[Any] = model(__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase__ ( self, __magic_name__, *__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = BioGptModel(__magic_name__ )
UpperCamelCase__ : Optional[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ), 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ), 0.01 )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, *__magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = self.num_labels
UpperCamelCase__ : int = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : List[Any] = model(__magic_name__, attention_mask=__magic_name__, token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = self.prepare_config_and_inputs()
(
UpperCamelCase__
) : Dict = config_and_inputs
UpperCamelCase__ : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
a : List[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a : List[Any] = False
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = BioGptModelTester(self )
UpperCamelCase__ : Optional[Any] = ConfigTester(self, config_class=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__, gradient_checkpointing=__magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCamelCase__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Optional[int] = 'left'
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ : Tuple = tokenizer.eos_token
UpperCamelCase__ : Optional[int] = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
UpperCamelCase__ : Optional[Any] = tokenizer(__magic_name__, return_tensors='''pt''', padding=__magic_name__ )
UpperCamelCase__ : Dict = inputs['input_ids'].to(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = model.generate(
input_ids=__magic_name__, attention_mask=inputs['''attention_mask'''].to(__magic_name__ ), )
UpperCamelCase__ : Union[str, Any] = tokenizer(sentences[0], return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCamelCase__ : List[Any] = model.generate(input_ids=__magic_name__ )
UpperCamelCase__ : List[str] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
UpperCamelCase__ : Any = tokenizer(sentences[1], return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCamelCase__ : List[str] = model.generate(input_ids=__magic_name__, max_length=model.config.max_length - num_paddings )
UpperCamelCase__ : int = tokenizer.batch_decode(__magic_name__, skip_special_tokens=__magic_name__ )
UpperCamelCase__ : List[Any] = tokenizer.decode(output_non_padded[0], skip_special_tokens=__magic_name__ )
UpperCamelCase__ : int = tokenizer.decode(output_padded[0], skip_special_tokens=__magic_name__ )
UpperCamelCase__ : str = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(__magic_name__, __magic_name__ )
self.assertListEqual(__magic_name__, [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Optional[int] = input_dict['input_ids']
UpperCamelCase__ : Tuple = input_ids.ne(1 ).to(__magic_name__ )
UpperCamelCase__ : Any = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCamelCase__ : Tuple = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : str = model(__magic_name__, attention_mask=__magic_name__, labels=__magic_name__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : int = 3
UpperCamelCase__ : List[Any] = 'multi_label_classification'
UpperCamelCase__ : Tuple = input_dict['input_ids']
UpperCamelCase__ : List[Any] = input_ids.ne(1 ).to(__magic_name__ )
UpperCamelCase__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : List[str] = model(__magic_name__, attention_mask=__magic_name__, labels=__magic_name__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
UpperCamelCase__ : List[str] = model(__magic_name__ )[0]
UpperCamelCase__ : str = 42384
UpperCamelCase__ : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape, __magic_name__ )
UpperCamelCase__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __magic_name__, atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = tokenizer('''COVID-19 is''', return_tensors='''pt''' ).to(__magic_name__ )
UpperCamelCase__ : int = model.generate(
**__magic_name__, min_length=100, max_length=1024, num_beams=5, early_stopping=__magic_name__, )
UpperCamelCase__ : Any = tokenizer.decode(output_ids[0], skip_special_tokens=__magic_name__ )
UpperCamelCase__ : List[str] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(__magic_name__, __magic_name__ )
| 201 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
assert column_title.isupper()
__a =0
__a =len(lowercase__ ) - 1
__a =0
while index >= 0:
__a =(ord(column_title[index] ) - 64) * pow(26 , lowercase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 218 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( lowercase__ , lowercase__ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def _snake_case ( lowercase__ , lowercase__ ):
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Optional[int] = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : int = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase__ )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
_lowerCamelCase : List[Any] = temp_dist
_lowerCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( lowercase__ , lowercase__ ):
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = SavedModel()
_UpperCAmelCase = []
with open(os.path.join(lowercase__ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
_UpperCAmelCase = json.load(lowercase__ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
_UpperCAmelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_UpperCAmelCase = sorted(lowercase__ )
_UpperCAmelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*lowercase__ , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCAmelCase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 339 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase__ ,collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Any ) -> Any:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
_a : Tuple =VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : int =TFVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any]=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Dict =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : List[str] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Any:
'''simple docstring'''
_a : List[Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Tuple ={'vision_model': vision_model, 'text_model': text_model}
_a : Dict =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE )
_a : Dict =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any]=None , **SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
'''simple docstring'''
_a : int =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
_a : List[Any] =TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Tuple =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : List[str] =after_output[0].numpy()
_a : Optional[int] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-5 )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : List[Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[str] =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : Tuple =model(
input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
_a : List[Any] =output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Any =to_atuple(vision_model.config.image_size )
_a : List[Any] =to_atuple(vision_model.config.patch_size )
_a : Optional[int] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a : Tuple =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a : Union[str, Any] =output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : Tuple =np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f"Difference between torch and flax is {diff} (>= {tol})." )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Optional[int] =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
_a : Any =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
_a : int =self.get_pretrained_model_and_inputs()
_a : str =model_a(**SCREAMING_SNAKE_CASE )
_a : List[Any] =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE )
_a : List[Any] =TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model_a(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =after_outputs[0].numpy()
_a : Tuple =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-5 )
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_a : Tuple =1_3
_a : Tuple =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Dict =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : int =random_attention_mask([batch_size, 4] )
_a : Any ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
_a : int =TFViTModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : Union[str, Any] =TFBertModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
_a : Optional[int] =TFViTModelTester(self )
_a : str =TFBertModelTester(self )
_a : int =vit_model_tester.prepare_config_and_inputs()
_a : str =bert_model_tester.prepare_config_and_inputs()
_a : Dict =vision_config_and_inputs
(
_a
) : Union[str, Any] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_a : int =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_a : Tuple =1_3
_a : Tuple =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Tuple =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : str =random_attention_mask([batch_size, 4] )
_a : Optional[Any] ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str=None , **SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]:
'''simple docstring'''
_a : Union[str, Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : List[Any] =model(
input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_a : Any =to_atuple(vision_model.config.image_size )
_a : Union[str, Any] =to_atuple(vision_model.config.patch_size )
_a : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a : Optional[int] =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a : List[Any] =output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
_a : List[Any] =TFDeiTModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : List[str] =TFRobertaModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =TFDeiTModelTester(self )
_a : Union[str, Any] =TFRobertaModelTester(self )
_a : Optional[int] =vit_model_tester.prepare_config_and_inputs()
_a : int =bert_model_tester.prepare_config_and_inputs()
_a : Dict =vision_config_and_inputs
(
_a
) : Dict =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Dict =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_a : Any =1_3
_a : List[Any] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Any =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : List[Any] =random_attention_mask([batch_size, 4] )
_a : List[Any] ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =TFCLIPVisionModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : List[str] =TFBertModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =TFCLIPVisionModelTester(self )
_a : Optional[Any] =TFBertModelTester(self )
_a : str =clip_model_tester.prepare_config_and_inputs()
_a : Optional[int] =bert_model_tester.prepare_config_and_inputs()
_a : List[Any] =vision_config_and_inputs
(
_a
) : List[str] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : List[str] =TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=SCREAMING_SNAKE_CASE )
_a : str =VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_a : Optional[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_a : List[Any] =processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="""np""" )
_a : List[Any] =model(**SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a : int =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 276 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowercase__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowercase__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ):
_lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ )
_lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCamelCase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCamelCase : Optional[Any] = [(pred, label)]
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ )
_lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' )
fas.append(lowercase__ )
_lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
_lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) )
_lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ )
_lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def A_ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def A_ ( self , lowercase , lowercase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "cb":
return acc_and_fa(lowercase , lowercase , fa_avg='macro' )
elif self.config_name == "record":
_lowerCamelCase : List[str] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowercase , lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase , lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) | 96 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : int , lowercase_ : List[str] , lowercase_ : str=13 , lowercase_ : List[Any]=3 , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : List[str]=224 , lowercase_ : int=1000 , lowercase_ : Any=[3, 3, 6, 4] , lowercase_ : Dict=[48, 56, 112, 220] , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = num_labels
snake_case_ = image_size
snake_case_ = layer_depths
snake_case_ = embed_dims
def A_ ( self : Tuple ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def A_ ( self : str ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase_ , layer_scale_init_value=1e-5 , )
def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Any ):
snake_case_ = SwiftFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A_ ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
snake_case_ = self.num_labels
snake_case_ = SwiftFormerForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
snake_case_ = SwiftFormerForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[str] ):
(snake_case_) = self.prepare_config_and_inputs()
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : str ):
snake_case_ = SwiftFormerModelTester(self )
snake_case_ = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def A_ ( self : Tuple ):
pass
def A_ ( self : Optional[int] ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def A_ ( self : Optional[Any] ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwiftFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def A_ ( self : Union[str, Any] ):
pass
def A_ ( self : List[str] ):
def check_hidden_states_output(lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ):
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ = outputs.hidden_states
snake_case_ = 8
self.assertEqual(len(lowercase_ ) , lowercase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def A_ ( self : str ):
def _config_zero_init(lowercase_ : str ):
snake_case_ = copy.deepcopy(lowercase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase_ , lowercase_ , 1e-10 )
if isinstance(getattr(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ ):
snake_case_ = _config_zero_init(getattr(lowercase_ , lowercase_ ) )
setattr(lowercase_ , lowercase_ , lowercase_ )
return configs_no_init
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowercase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A_ ( self : int ):
pass
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Optional[Any] ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def A_ ( self : List[Any] ):
snake_case_ = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**lowercase_ )
# verify the logits
snake_case_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 56 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 96 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> List[str]:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""only integers accepted as input""" )
else:
UpperCamelCase = str(abs(lowercase__ ) )
UpperCamelCase = [list(lowercase__ ) for char in range(len(lowercase__ ) )]
for index in range(len(lowercase__ ) ):
num_transpositions[index].pop(lowercase__ )
return max(
int("""""".join(list(lowercase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 321 |
"""simple docstring"""
# Imports
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
if red is not None:
_lowerCamelCase : Optional[int] = red
if green is not None:
_lowerCamelCase : Optional[Any] = green
if blue is not None:
_lowerCamelCase : Tuple = blue
if red_edge is not None:
_lowerCamelCase : Optional[Any] = red_edge
if nir is not None:
_lowerCamelCase : Union[str, Any] = nir
return True
def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
_lowerCamelCase : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A_ ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A_ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A_ ( self ):
return self.nir * (self.red / (self.green**2))
def A_ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A_ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def A_ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def A_ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A_ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def A_ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A_ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A_ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A_ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A_ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A_ ( self ):
return (self.nir / self.green) - 1
def A_ ( self ):
return (self.nir / self.redEdge) - 1
def A_ ( self ):
return (self.red - self.blue) / self.red
def A_ ( self ):
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A_ ( self ):
return self.nir - self.green
def A_ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A_ ( self ):
_lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A_ ( self , lowercase=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def A_ ( self , lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A_ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A_ ( self , lowercase=None , lowercase=None ):
return (self.nir - b) / (a * self.red)
def A_ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A_ ( self ):
return (self.red + self.green + self.blue) / 30.5
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def A_ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A_ ( self ):
return self.green / (self.nir + self.red + self.green)
def A_ ( self ):
return self.nir / (self.nir + self.red + self.green)
def A_ ( self ):
return self.red / (self.nir + self.red + self.green)
def A_ ( self ):
return (self.green - self.red) / (self.green + self.red)
def A_ ( self ):
return (self.red - self.green) / (self.red + self.green)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A_ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def A_ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states | 96 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__UpperCamelCase =key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__UpperCamelCase =key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__UpperCamelCase =key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__UpperCamelCase =key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__UpperCamelCase =key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__UpperCamelCase =key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__UpperCamelCase =key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__UpperCamelCase =key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__UpperCamelCase =key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__UpperCamelCase =key.replace('image_encoder.module' , 'flava.image_model' )
__UpperCamelCase =key.replace('text_encoder.module' , 'flava.text_model' )
__UpperCamelCase =key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__UpperCamelCase =key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__UpperCamelCase =key.replace('text_projection' , 'flava.text_projection' )
__UpperCamelCase =key.replace('image_projection' , 'flava.image_projection' )
__UpperCamelCase =value.float()
for key, value in codebook_state_dict.items():
__UpperCamelCase =value
return upgrade
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
if config_path is not None:
__UpperCamelCase =FlavaConfig.from_pretrained(lowercase__ )
else:
__UpperCamelCase =FlavaConfig()
__UpperCamelCase =FlavaForPreTraining(lowercase__ ).eval()
__UpperCamelCase =convert_dalle_checkpoint(lowercase__ , lowercase__ , save_checkpoint=lowercase__ )
if os.path.exists(lowercase__ ):
__UpperCamelCase =torch.load(lowercase__ , map_location='cpu' )
else:
__UpperCamelCase =torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )
__UpperCamelCase =upgrade_state_dict(lowercase__ , lowercase__ )
hf_model.load_state_dict(lowercase__ )
__UpperCamelCase =hf_model.state_dict()
__UpperCamelCase =count_parameters(lowercase__ )
__UpperCamelCase =count_parameters(lowercase__ ) + count_parameters(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_A = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 62 |
"""simple docstring"""
lowercase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowercase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = from_type.lower().strip('s' )
_lowerCamelCase : List[Any] = to_type.lower().strip('s' )
_lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
_lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
if from_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Tuple = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
if to_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Any = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
_lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized]
_lowerCamelCase : int = METRIC_CONVERSION[to_sanitized]
_lowerCamelCase : List[str] = 1
if from_exponent > to_exponent:
_lowerCamelCase : List[str] = from_exponent - to_exponent
else:
_lowerCamelCase : List[Any] = -(to_exponent - from_exponent)
return value * pow(10 , lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 96 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
_SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
_SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
_SCREAMING_SNAKE_CASE : Dict = ''''''
_SCREAMING_SNAKE_CASE : List[Any] = ''''''
def lowerCamelCase__ ( _lowerCamelCase : Any ) -> Union[str, Any]:
# authorize twitter, initialize tweepy
lowerCamelCase_ = tweepy.OAuthHandler(lowercase__ , lowercase__ )
auth.set_access_token(lowercase__ , lowercase__ )
lowerCamelCase_ = tweepy.API(lowercase__ )
# initialize a list to hold all the tweepy Tweets
lowerCamelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCamelCase_ = api.user_timeline(screen_name=lowercase__ , count=200 )
# save most recent tweets
alltweets.extend(lowercase__ )
# save the id of the oldest tweet less one
lowerCamelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase__ ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCamelCase_ = api.user_timeline(
screen_name=lowercase__ , count=200 , max_id=lowercase__ )
# save most recent tweets
alltweets.extend(lowercase__ )
# update the id of the oldest tweet less one
lowerCamelCase_ = alltweets[-1].id - 1
print(F'''...{len(lowercase__ )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCamelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , 'w' ) as f:
lowerCamelCase_ = csv.writer(lowercase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowercase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 183 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple, _snake_case : int, _snake_case : Union[str, Any]=1_3, _snake_case : int=7, _snake_case : Tuple=True, _snake_case : Union[str, Any]=True, _snake_case : Dict=True, _snake_case : Optional[Any]=True, _snake_case : Tuple=9_9, _snake_case : Any=6_4, _snake_case : str=3_2, _snake_case : Optional[int]=5, _snake_case : Any=4, _snake_case : str=3_7, _snake_case : List[Any]="gelu", _snake_case : Any=0.1, _snake_case : List[str]=0.1, _snake_case : Union[str, Any]=5_1_2, _snake_case : int=1_6, _snake_case : Tuple=2, _snake_case : Union[str, Any]=0.0_2, _snake_case : str=3, _snake_case : str=4, _snake_case : Optional[int]=None, ) ->List[str]:
snake_case__ : List[Any] = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Dict = is_training
snake_case__ : int = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : str = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Any = embedding_size
snake_case__ : Any = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : str = num_labels
snake_case__ : int = num_choices
snake_case__ : Any = scope
def lowercase_ ( self : int ) ->Optional[Any]:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[Any] = None
if self.use_token_type_ids:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case__ : Dict = None
snake_case__ : Any = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size], self.num_choices )
snake_case__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_snake_case, initializer_range=self.initializer_range, )
def lowercase_ ( self : Any, _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : str, _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], _snake_case : List[str] ) ->Tuple:
snake_case__ : Union[str, Any] = MobileBertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case )
snake_case__ : Union[str, Any] = model(_snake_case, token_type_ids=_snake_case )
snake_case__ : List[str] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase_ ( self : Optional[Any], _snake_case : Optional[Any], _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->int:
snake_case__ : Union[str, Any] = MobileBertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Union[str, Any], _snake_case : List[str], _snake_case : Tuple, _snake_case : List[str], _snake_case : List[str], _snake_case : Optional[int], _snake_case : Union[str, Any], _snake_case : Optional[int] ) ->List[Any]:
snake_case__ : List[Any] = MobileBertForNextSentencePrediction(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def lowercase_ ( self : str, _snake_case : str, _snake_case : Union[str, Any], _snake_case : Any, _snake_case : Any, _snake_case : int, _snake_case : Dict, _snake_case : Any ) ->Union[str, Any]:
snake_case__ : str = MobileBertForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : int = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case, next_sentence_label=_snake_case, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def lowercase_ ( self : List[str], _snake_case : List[str], _snake_case : Optional[Any], _snake_case : List[Any], _snake_case : int, _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : Union[str, Any] ) ->Dict:
snake_case__ : Any = MobileBertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : str = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, start_positions=_snake_case, end_positions=_snake_case, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : List[Any], _snake_case : Optional[int], _snake_case : Tuple, _snake_case : str, _snake_case : Union[str, Any], _snake_case : Union[str, Any], _snake_case : Optional[Any], _snake_case : List[str] ) ->Any:
snake_case__ : Optional[int] = self.num_labels
snake_case__ : List[str] = MobileBertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : str = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : int, _snake_case : List[Any], _snake_case : Union[str, Any], _snake_case : Dict, _snake_case : int, _snake_case : Union[str, Any], _snake_case : List[Any], _snake_case : List[Any] ) ->Optional[Any]:
snake_case__ : Dict = self.num_labels
snake_case__ : Optional[int] = MobileBertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : str, _snake_case : Union[str, Any], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Dict, _snake_case : Tuple ) ->Optional[Any]:
snake_case__ : List[str] = self.num_choices
snake_case__ : Optional[int] = MobileBertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : List[str] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
snake_case__ : Tuple = model(
_snake_case, attention_mask=_snake_case, token_type_ids=_snake_case, labels=_snake_case, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase_ ( self : Union[str, Any] ) ->str:
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
snake_case__
) : Union[str, Any] = config_and_inputs
snake_case__ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def lowercase_ ( self : Optional[Any], _snake_case : Optional[Any], _snake_case : List[str], _snake_case : Tuple=False ) ->List[Any]:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
snake_case__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=_snake_case )
snake_case__ : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
return inputs_dict
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : int = MobileBertModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self, config_class=_snake_case, hidden_size=3_7 )
def lowercase_ ( self : Dict ) ->Optional[int]:
self.config_tester.run_common_tests()
def lowercase_ ( self : int ) ->str:
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_snake_case )
def lowercase_ ( self : List[Any] ) ->List[Any]:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_snake_case )
def lowercase_ ( self : Dict ) ->List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_snake_case )
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_snake_case )
def lowercase_ ( self : str ) ->str:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_snake_case )
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_snake_case )
def lowercase_ ( self : Union[str, Any] ) ->str:
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_snake_case )
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_snake_case )
def lowercase_ (A : Any ):
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
a_ :Any = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
snake_case__ : Optional[int] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_snake_case )
snake_case__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
snake_case__ : Any = model(_snake_case )[0]
snake_case__ : Tuple = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, _snake_case )
snake_case__ : List[Any] = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
], device=_snake_case, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
snake_case__ : Any = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
snake_case__ : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 0 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 290 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
super().__init__(*__magic_name__, **__magic_name__ )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase__ ( self, __magic_name__=None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = {}
if top_k is not None:
UpperCamelCase__ : List[Any] = top_k
return {}, {}, postprocess_params
def __call__( self, __magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
return super().__call__(__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = load_image(__magic_name__ )
UpperCamelCase__ : Optional[Any] = self.image_processor(images=__magic_name__, return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model(**__magic_name__ )
return model_outputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=5 ) -> Any:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase__ : List[str] = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ : str = probs.topk(__magic_name__ )
elif self.framework == "tf":
UpperCamelCase__ : List[Any] = stable_softmax(model_outputs.logits, axis=-1 )[0]
UpperCamelCase__ : Tuple = tf.math.top_k(__magic_name__, k=__magic_name__ )
UpperCamelCase__ : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCamelCase__ : str = scores.tolist()
UpperCamelCase__ : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__magic_name__, __magic_name__ )]
| 201 |
"""simple docstring"""
import functools
from typing import Any
def _snake_case ( lowercase__ , lowercase__ ):
# Validation
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase__ , lowercase__ ) or not all(
isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase : dict[str, Any] = {}
_lowerCamelCase : List[Any] = 'WORD_KEEPER'
for word in words:
_lowerCamelCase : Dict = trie
for c in word:
if c not in trie_node:
_lowerCamelCase : Any = {}
_lowerCamelCase : str = trie_node[c]
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Dict = len(lowercase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase__ ) -> bool:
if index == len_string:
return True
_lowerCamelCase : List[Any] = trie
for i in range(lowercase__ , lowercase__ ):
_lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ )
if trie_node is None:
return False
if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
from manim import *
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =Rectangle(height=0.5 , width=0.5 )
__a =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a =[mem.copy() for i in range(6 )]
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
__a =Text('CPU' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
__a =[mem.copy() for i in range(4 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('GPU' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('Model' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
__a =[]
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__a =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('Loaded Checkpoint' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__a =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
__a =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__a =MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
__a =[]
__a =[]
for i, rect in enumerate(__snake_case ):
__a =fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
__a =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 218 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ = 16
lowercase__ = 32
def _snake_case ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ):
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ )
_lowerCamelCase : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : int = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCamelCase : int = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ , lowercase__ ):
# Initialize accelerator
_lowerCamelCase : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Optional[int] = config['lr']
_lowerCamelCase : Optional[int] = int(config['num_epochs'] )
_lowerCamelCase : Union[str, Any] = int(config['seed'] )
_lowerCamelCase : Optional[int] = int(config['batch_size'] )
_lowerCamelCase : Dict = args.model_name_or_path
set_seed(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
_lowerCamelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCamelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCamelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCamelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
_lowerCamelCase : Any = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCamelCase : Dict = 0
# Now we train the model
_lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
_lowerCamelCase : List[Any] = model(**lowercase__ )
_lowerCamelCase : int = outputs.loss
_lowerCamelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCamelCase : Union[str, Any] = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**lowercase__ )
_lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCamelCase, _lowerCamelCase : List[str] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
_lowerCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
_lowerCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
_lowerCamelCase : Tuple = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_lowerCamelCase : str = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ):
_lowerCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowercase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase__ , )
parser.add_argument(
'--output_dir' , type=lowercase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowercase__ , default=lowercase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowercase__ , default=3 , help='Number of train epochs.' , )
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
A__: List[str] = logging.getLogger()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> List[str]:
_a : List[Any] ={}
_a : List[Any] =os.path.join(lowercase__ ,"""all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__ ,"""r""" ) as f:
_a : List[Any] =json.load(lowercase__ )
else:
raise ValueError(F"can\'t find {path}" )
return results
A__: Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
import xla_spawn
_a : List[Any] =self.get_auto_remove_tmp_dir()
_a : List[Any] =f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ):
_a : Dict =time()
xla_spawn.main()
_a : Any =time()
_a : Optional[int] =get_results(SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
import xla_spawn
_a : Tuple ='\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ):
xla_spawn.main()
| 276 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """new-model"""
if is_tf_available():
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def A_ ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
_lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCamelCase : Dict = ['FunnelBaseModel']
_lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
try:
AutoConfig.register('new-model' , lowercase )
_lowerCamelCase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() )
_lowerCamelCase : int = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A_ ( self ):
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def A_ ( self ):
# Make sure we have cached the model.
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 96 | 0 |
'''simple docstring'''
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1, 999 )
for b in range(lowercase__, 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
'''simple docstring'''
import os
def lowercase__ ( )-> Any:
with open(os.path.dirname(lowercase__ ) + """/grid.txt""" ) as f:
UpperCamelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase__ ) for x in f.readline().split()] )
UpperCamelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 321 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{sampling_rate}'''
_lowerCamelCase : str = '1'
_lowerCamelCase : str = 'f32le'
_lowerCamelCase : Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_lowerCamelCase : List[Any] = output_stream[0]
_lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ):
_lowerCamelCase : Optional[Any] = f'''{sampling_rate}'''
_lowerCamelCase : List[str] = '1'
if format_for_conversion == "s16le":
_lowerCamelCase : List[str] = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
_lowerCamelCase : Dict = platform.system()
if system == "Linux":
_lowerCamelCase : Optional[int] = 'alsa'
_lowerCamelCase : Optional[Any] = 'default'
elif system == "Darwin":
_lowerCamelCase : Optional[int] = 'avfoundation'
_lowerCamelCase : Any = ':0'
elif system == "Windows":
_lowerCamelCase : Tuple = 'dshow'
_lowerCamelCase : Tuple = 'default'
_lowerCamelCase : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ):
if stream_chunk_s is not None:
_lowerCamelCase : int = stream_chunk_s
else:
_lowerCamelCase : Optional[Any] = chunk_length_s
_lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
_lowerCamelCase : List[str] = np.intaa
_lowerCamelCase : str = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : Any = np.floataa
_lowerCamelCase : List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
_lowerCamelCase : Union[str, Any] = chunk_length_s / 6
_lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
_lowerCamelCase : Any = [stride_length_s, stride_length_s]
_lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCamelCase : List[Any] = datetime.datetime.now()
_lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
_lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ )
_lowerCamelCase : int = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_lowerCamelCase : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
_lowerCamelCase : int = B''
_lowerCamelCase, _lowerCamelCase : Dict = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
_lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
_lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
_lowerCamelCase : str = (_stride_left, stride_right)
_lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_lowerCamelCase : List[Any] = False
yield item
_lowerCamelCase : Optional[Any] = stride_left
_lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
_lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_lowerCamelCase : Tuple = False
yield item
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
_lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 96 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger()
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Any = 4_2
__UpperCAmelCase : Optional[int] = field(default_factory=lowercase )
__UpperCAmelCase : Dict = field(default_factory=lowercase )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple ):
__lowercase = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__, nn.Convad ) or isinstance(UpperCAmelCase__, nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self : List[str], UpperCAmelCase__ : Optional[int] ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self : Any ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Any = 4_2
__UpperCAmelCase : List[str] = 4_2
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = field(default_factory=lowercase )
__UpperCAmelCase : int = field(default_factory=lowercase )
def __call__( self : Tuple, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
__lowercase = Tracker(self.src )(UpperCAmelCase__ ).parametrized
__lowercase = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.src_skip, UpperCAmelCase__ ) )
__lowercase = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.dest_skip, UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while"""
F""" destination module has {len(UpperCAmelCase__ )}.""" )
for dest_m, src_m in zip(UpperCAmelCase__, UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : List[Any], UpperCamelCase_ : str, UpperCamelCase_ : List[Any] = True) -> Union[str, Any]:
'''simple docstring'''
print(F"""Converting {name}...""")
with torch.no_grad():
__lowercase = timm.create_model(lowercase__, pretrained=lowercase__).eval()
__lowercase = ResNetForImageClassification(lowercase__).eval()
__lowercase = ModuleTransfer(src=lowercase__, dest=lowercase__)
__lowercase = torch.randn((1, 3, 224, 224))
module_transfer(lowercase__)
assert torch.allclose(from_model(lowercase__), our_model(lowercase__).logits), "The model logits don't match the original one."
__lowercase = F"""resnet{"-".join(name.split("resnet"))}"""
print(lowercase__)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message="Add model", use_temp_dir=lowercase__, )
# we can use the convnext one
__lowercase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name, commit_message="Add image processor", use_temp_dir=lowercase__, )
print(F"""Pushed {checkpoint_name}""")
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : int = None, UpperCamelCase_ : str = True) -> Optional[Any]:
'''simple docstring'''
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = 1000
__lowercase = (1, num_labels)
__lowercase = 'huggingface/label-files'
__lowercase = num_labels
__lowercase = json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type="dataset"), "r"))
__lowercase = {int(lowercase__): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = partial(lowercase__, num_labels=lowercase__, idalabel=lowercase__, labelaid=lowercase__)
__lowercase = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type="basic"),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type="basic"),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"),
}
if model_name:
convert_weight_and_push(lowercase__, names_to_config[model_name], lowercase__, lowercase__)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase__, lowercase__, lowercase__, lowercase__)
return config, expected_shape
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_a = parser.parse_args()
_a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 17 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """ctrl"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = n_positions
_lowerCamelCase : Optional[int] = n_embd
_lowerCamelCase : str = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : Any = dff
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : str = use_cache
super().__init__(**lowercase ) | 96 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 4_2
UpperCAmelCase__ : List[Any] = 4_2
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Optional[Any]:
__UpperCamelCase =[[] for _ in range(A_ )]
__UpperCamelCase =size
def __getitem__( self , A_ ) -> Any:
return iter(self._graph[vertex] )
@property
def _a ( self ) -> Optional[Any]:
return self._size
def _a ( self , A_ , A_ , A_ ) -> str:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(A_ , A_ ) )
def _a ( self , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =deque([start_vertex] )
__UpperCamelCase =[None] * self.size
__UpperCamelCase =0
while queue:
__UpperCamelCase =queue.popleft()
__UpperCamelCase =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCamelCase =current_distance + edge.weight
__UpperCamelCase =distances[edge.destination_vertex]
if (
isinstance(A_ , A_ )
and new_distance >= dest_vertex_distance
):
continue
__UpperCamelCase =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( __snake_case , __snake_case , __snake_case ):
@register_to_config
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] = False , ) -> Dict:
super().__init__()
lowerCamelCase_ = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = False
lowerCamelCase_ = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = TaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , d_model=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , feed_forward_proj=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = nn.ModuleList()
for lyr_num in range(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = TaBlock(__SCREAMING_SNAKE_CASE )
self.encoders.append(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = TaLayerNorm(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
lowerCamelCase_ = self.token_embedder(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = encoder_input_tokens.shape[1]
lowerCamelCase_ = torch.arange(__SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device )
x += self.position_encoding(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.dropout_pre(__SCREAMING_SNAKE_CASE )
# inverted the attention mask
lowerCamelCase_ = encoder_input_tokens.size()
lowerCamelCase_ = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for lyr in self.encoders:
lowerCamelCase_ = lyr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ = self.layer_norm(__SCREAMING_SNAKE_CASE )
return self.dropout_post(__SCREAMING_SNAKE_CASE ), encoder_inputs_mask
| 183 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = """dummy_data"""
lowerCamelCase__ = """datasets"""
lowerCamelCase__ = False
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ):
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Dict = dataset_name
_lowerCamelCase : Union[str, Any] = cache_dir
_lowerCamelCase : Dict = use_local_dummy_data
_lowerCamelCase : Tuple = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : str = str(lowercase )
# to be downloaded
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : int = None
@property
def A_ ( self ):
if self._dummy_file is None:
_lowerCamelCase : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def A_ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def A_ ( self ):
_lowerCamelCase : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : int = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase )
return os.path.join(lowercase , self.dummy_file_name )
@property
def A_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self ):
if self._bucket_url is None:
_lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def A_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def A_ ( self , lowercase , *lowercase ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase ):
return self.create_dummy_data_dict(lowercase , lowercase )
elif isinstance(lowercase , (list, tuple) ):
return self.create_dummy_data_list(lowercase , lowercase )
else:
return self.create_dummy_data_single(lowercase , lowercase )
def A_ ( self , lowercase , *lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , *lowercase , **lowercase ):
return path
def A_ ( self ):
return {}
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase ):
for single_url in single_urls:
download_callback(lowercase )
else:
_lowerCamelCase : List[Any] = single_urls
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase ):
_lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Optional[int] = single_urls
_lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) )
_lowerCamelCase : int = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url )
_lowerCamelCase : int = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : List[str] = [data_url[0]] * len(lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(lowercase )
return dummy_data_list
def A_ ( self , lowercase , lowercase ):
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self , lowercase ):
def _iter_archive_members(lowercase ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : str = Path(self.dummy_file ).parent
_lowerCamelCase : Union[str, Any] = path.relative_to(lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase )
_lowerCamelCase : Optional[int] = Path(lowercase )
_lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' )
def A_ ( self , lowercase ):
if not isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(lowercase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(lowercase , lowercase ) | 96 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a_ :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int], _snake_case : Any, _snake_case : List[str]=7_6_8 ) ->str:
super().__init__(_snake_case )
snake_case__ : Any = proj_size
snake_case__ : Dict = CLIPVisionModel(_snake_case )
snake_case__ : List[str] = PaintByExampleMapper(_snake_case )
snake_case__ : Optional[Any] = nn.LayerNorm(config.hidden_size )
snake_case__ : int = nn.Linear(config.hidden_size, self.proj_size )
# uncondition for scaling
snake_case__ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowercase_ ( self : List[Any], _snake_case : Optional[int], _snake_case : int=False ) ->Dict:
snake_case__ : Union[str, Any] = self.model(pixel_values=_snake_case )
snake_case__ : int = clip_output.pooler_output
snake_case__ : str = self.mapper(latent_states[:, None] )
snake_case__ : List[Any] = self.final_layer_norm(_snake_case )
snake_case__ : Dict = self.proj_out(_snake_case )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class snake_case__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str, _snake_case : Optional[int] ) ->Tuple:
super().__init__()
snake_case__ : Tuple = (config.num_hidden_layers + 1) // 5
snake_case__ : int = config.hidden_size
snake_case__ : Optional[Any] = 1
snake_case__ : str = nn.ModuleList(
[
BasicTransformerBlock(_snake_case, _snake_case, _snake_case, activation_fn='gelu', attention_bias=_snake_case )
for _ in range(_snake_case )
] )
def lowercase_ ( self : List[str], _snake_case : Any ) ->Union[str, Any]:
for block in self.blocks:
snake_case__ : Tuple = block(_snake_case )
return hidden_states
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 96 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a__: Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a__: Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
lowercase__ = input('Enter numbers separated by a comma:\n').strip()
lowercase__ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 290 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
# add QFormer tokenizer
_lowerCamelCase : int = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
encoding.update(lowercase )
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
encoding.update(lowercase )
return encoding
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A_ ( self , lowercase , **lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase )
return super().save_pretrained(lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' )
_lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase )
args.append(lowercase )
return cls(*lowercase ) | 96 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ = getLogger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Union[str, Any] = 8 , __UpperCAmelCase: Optional[int] = 1024 , __UpperCAmelCase: Optional[Any]="val" , __UpperCAmelCase: int=None , __UpperCAmelCase: Any=False , __UpperCAmelCase: Any="summarization" , __UpperCAmelCase: Union[str, Any]=None , __UpperCAmelCase: List[Any]=1 , __UpperCAmelCase: str = None , __UpperCAmelCase: Optional[Any]="" , **__UpperCAmelCase: str , ) -> Optional[Any]:
UpperCamelCase__ : List[str] = str(lowercase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=lowercase__ )
UpperCamelCase__ : str = Path(lowercase__ )
UpperCamelCase__ : int = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowercase__ )
UpperCamelCase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ ).cuda()
if fpaa:
UpperCamelCase__ : Optional[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowercase__ , lowercase__ ) # update config with task specific params
UpperCamelCase__ : Any = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase__ : Dict = num_return_sequences
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowercase__ )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase__ : Tuple = tokenizer.model_max_length
if prefix is None:
UpperCamelCase__ : Optional[Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''
UpperCamelCase__ : int = SeqaSeqDataset(
lowercase__ , lowercase__ , lowercase__ , max_target_length=1024 , type_path=lowercase__ , n_obs=lowercase__ , prefix=lowercase__ , **lowercase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase__ : List[Any] = ds.make_sortish_sampler(lowercase__ , distributed=lowercase__ , add_extra_examples=lowercase__ , shuffle=lowercase__ )
UpperCamelCase__ : List[Any] = DataLoader(lowercase__ , sampler=lowercase__ , batch_size=lowercase__ , collate_fn=ds.collate_fn )
UpperCamelCase__ : Optional[Any] = []
for batch in tqdm(lowercase__ ):
UpperCamelCase__ : Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowercase__ , num_beams=lowercase__ , **lowercase__ , )
UpperCamelCase__ : Tuple = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
UpperCamelCase__ : str = batch['ids']
if num_return_sequences > 1:
UpperCamelCase__ : Any = chunks(lowercase__ , lowercase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowercase__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(lowercase__ , lowercase__ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=lowercase__ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=lowercase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=lowercase__ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=lowercase__ , default=lowercase__ )
parser.add_argument(
'''--type_path''' , type=lowercase__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=lowercase__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowercase__ , default=8 , required=lowercase__ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=lowercase__ , default=-1 , required=lowercase__ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=lowercase__ , default=lowercase__ , required=lowercase__ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=lowercase__ , default=1 , required=lowercase__ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=lowercase__ , default=600 , required=lowercase__ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=lowercase__ , default=lowercase__ , required=lowercase__ )
parser.add_argument('''--tgt_lang''' , type=lowercase__ , default=lowercase__ , required=lowercase__ )
parser.add_argument(
'''--prefix''' , type=lowercase__ , required=lowercase__ , default=lowercase__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
UpperCamelCase__ : str = time.time()
UpperCamelCase__ : List[Any] = parser.parse_known_args()
UpperCamelCase__ : Any = parse_numeric_n_bool_cl_kwargs(lowercase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
UpperCamelCase__ : List[Any] = Path(args.save_dir + '''_tmp''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) # this handles locking.
UpperCamelCase__ : Any = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase__ : Optional[Any] = {}
if args.src_lang is not None:
UpperCamelCase__ : Tuple = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase__ : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowercase__ )
UpperCamelCase__ : Optional[Any] = eval_data_dir(
args.data_dir , lowercase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowercase__ , **lowercase__ , )
if args.local_rank <= 0:
UpperCamelCase__ : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowercase__ )
UpperCamelCase__ : Union[str, Any] = gather_results_from_each_node(lowercase__ , lowercase__ , args.sync_timeout )
UpperCamelCase__ : str = combine_partial_results(lowercase__ )
if args.num_return_sequences > 1:
UpperCamelCase__ : str = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowercase__ , lowercase__ )
return
UpperCamelCase__ : Any = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(lowercase__ ) as f:
UpperCamelCase__ : int = [x.rstrip() for x in f.readlines()][: len(lowercase__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase__ : str = 'translation' in args.task
UpperCamelCase__ : Tuple = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase__ : List[str] = 'bleu' if calc_bleu else 'rouge'
UpperCamelCase__ : Dict = score_fn(lowercase__ , lowercase__ )
UpperCamelCase__ : Any = len(lowercase__ )
UpperCamelCase__ : Dict = time.time() - start_time
UpperCamelCase__ : Optional[int] = round(runtime / metrics['''n_obs'''] , 4 )
UpperCamelCase__ : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase__ : Union[str, Any] = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(lowercase__ , lowercase__ , indent=lowercase__ )
print(lowercase__ )
write_txt_file(lowercase__ , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowercase__ , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(lowercase__ )
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> int:
UpperCamelCase__ : str = []
for partial_result in partial_results:
records.extend(lowercase__ )
UpperCamelCase__ : int = sorted(lowercase__ , key=lambda __UpperCAmelCase : x["id"] )
UpperCamelCase__ : List[Any] = [x['pred'] for x in records]
return preds
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] ) -> str:
# WAIT FOR lots of .json files
UpperCamelCase__ : str = time.time()
logger.info('''waiting for all nodes to finish''' )
UpperCamelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCamelCase__ : Union[str, Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(lowercase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase__ : str = lmap(lowercase__ , lowercase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 201 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
if hor == 128:
__a =('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__a =(32, 128, 256)
__a =('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
__a =('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__a =(32, 64, 128, 256)
__a =('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
__a =torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
__a =model.state_dict()
__a ={
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
__a =UNetaDModel(**lowercase__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__a =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__a =state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
def UpperCamelCase_( ):
"""simple docstring"""
__a ={
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
__a =torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
__a =model
__a =UNetaDModel(**lowercase__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__a =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__a =state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 218 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( lowercase__ , lowercase__ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def _snake_case ( lowercase__ , lowercase__ ):
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Optional[int] = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : int = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase__ )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
_lowerCamelCase : List[Any] = temp_dist
_lowerCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( lowercase__ , lowercase__ ):
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = CLIPConfig
UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : Optional[Any] , A : str) -> List[Any]:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = CLIPVisionModelWithProjection(config.vision_config)
_UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1)
_UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1)
@torch.no_grad()
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : Any , A : Any=0.5 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.vision_model(A)[0]
_UpperCAmelCase = self.p_head(A)
_UpperCAmelCase = nsfw_detected.flatten()
_UpperCAmelCase = nsfw_detected > p_threshold
_UpperCAmelCase = nsfw_detected.tolist()
if any(A):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.')
for idx, nsfw_detected_ in enumerate(A):
if nsfw_detected_:
_UpperCAmelCase = np.zeros(images[idx].shape)
_UpperCAmelCase = self.w_head(A)
_UpperCAmelCase = watermark_detected.flatten()
_UpperCAmelCase = watermark_detected > w_threshold
_UpperCAmelCase = watermark_detected.tolist()
if any(A):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.')
for idx, watermark_detected_ in enumerate(A):
if watermark_detected_:
_UpperCAmelCase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 339 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
if not isinstance(lowercase__ ,lowercase__ ):
_a : Any =F"Input value of [number={number}] must be an integer"
raise TypeError(lowercase__ )
if number < 1:
_a : Tuple =F"Input value of [number={number}] must be > 0"
raise ValueError(lowercase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : Optional[Any] =int(math.log(number // 3 ,2 ) ) + 2
_a : int =[3, 5]
_a : Dict =2
_a : Optional[int] =3
for block in range(1 ,lowercase__ ):
for _ in range(lowercase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
A__: List[str] = 0
try:
A__: Any = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 276 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowercase__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowercase__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ):
_lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ )
_lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCamelCase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCamelCase : Optional[Any] = [(pred, label)]
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ )
_lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' )
fas.append(lowercase__ )
_lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
_lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) )
_lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ )
_lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def A_ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def A_ ( self , lowercase , lowercase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "cb":
return acc_and_fa(lowercase , lowercase , fa_avg='macro' )
elif self.config_name == "record":
_lowerCamelCase : List[str] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowercase , lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase , lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) | 96 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__, lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
snake_case_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 96 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
if height >= 1:
move_tower(height - 1 , lowercase__ , lowercase__ , lowercase__ )
move_disk(lowercase__ , lowercase__ )
move_tower(height - 1 , lowercase__ , lowercase__ , lowercase__ )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple:
print("""moving disk from""" , lowercase__ , """to""" , lowercase__ )
def lowercase__ ( )-> int:
UpperCamelCase = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowercase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 321 |
"""simple docstring"""
# Imports
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
if red is not None:
_lowerCamelCase : Optional[int] = red
if green is not None:
_lowerCamelCase : Optional[Any] = green
if blue is not None:
_lowerCamelCase : Tuple = blue
if red_edge is not None:
_lowerCamelCase : Optional[Any] = red_edge
if nir is not None:
_lowerCamelCase : Union[str, Any] = nir
return True
def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
_lowerCamelCase : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A_ ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A_ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A_ ( self ):
return self.nir * (self.red / (self.green**2))
def A_ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A_ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def A_ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def A_ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A_ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def A_ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A_ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A_ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A_ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A_ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A_ ( self ):
return (self.nir / self.green) - 1
def A_ ( self ):
return (self.nir / self.redEdge) - 1
def A_ ( self ):
return (self.red - self.blue) / self.red
def A_ ( self ):
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A_ ( self ):
return self.nir - self.green
def A_ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A_ ( self ):
_lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A_ ( self , lowercase=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def A_ ( self , lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A_ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A_ ( self , lowercase=None , lowercase=None ):
return (self.nir - b) / (a * self.red)
def A_ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A_ ( self ):
return (self.red + self.green + self.blue) / 30.5
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def A_ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A_ ( self ):
return self.green / (self.nir + self.red + self.green)
def A_ ( self ):
return self.nir / (self.nir + self.red + self.green)
def A_ ( self ):
return self.red / (self.nir + self.red + self.green)
def A_ ( self ):
return (self.green - self.red) / (self.green + self.red)
def A_ ( self ):
return (self.red - self.green) / (self.red + self.green)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A_ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def A_ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 | 0 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_a = logging.getLogger(__name__)
_a = 50 # max width of layer names
_a = 70 # max width of quantizer names
def _A ( UpperCamelCase_ : int) -> Dict:
'''simple docstring'''
__lowercase = parser.add_argument_group("quant_trainer arguments")
group.add_argument("--wprec", type=lowercase__, default=8, help="weight precision")
group.add_argument("--aprec", type=lowercase__, default=8, help="activation precision")
group.add_argument("--quant-per-tensor", action="store_true", help="per tensor weight scaling")
group.add_argument("--quant-disable", action="store_true", help="disable all quantizers")
group.add_argument("--quant-disable-embeddings", action="store_true", help="disable all embeddings quantizers")
group.add_argument("--quant-disable-keyword", type=lowercase__, nargs="+", help="disable quantizers by keyword")
group.add_argument("--quant-disable-layer-module", type=lowercase__, help="disable quantizers by keyword under layer.")
group.add_argument("--quant-enable-layer-module", type=lowercase__, help="enable quantizers by keyword under layer")
group.add_argument("--calibrator", default="max", help="which quantization range calibrator to use")
group.add_argument("--percentile", default=lowercase__, type=lowercase__, help="percentile for PercentileCalibrator")
group.add_argument("--fuse-qkv", action="store_true", help="use the same scale factor for qkv")
group.add_argument("--clip-gelu", metavar="N", type=lowercase__, help="clip gelu output maximum value to N")
group.add_argument(
"--recalibrate-weights", action="store_true", help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
), )
def _A ( UpperCamelCase_ : int) -> Union[str, Any]:
'''simple docstring'''
if args.calibrator == "max":
__lowercase = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator")
__lowercase = 'histogram'
elif args.calibrator == "mse":
__lowercase = 'histogram'
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""")
__lowercase = QuantDescriptor(num_bits=args.aprec, calib_method=lowercase__)
__lowercase = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,)))
quant_nn.QuantLinear.set_default_quant_desc_input(lowercase__)
quant_nn.QuantLinear.set_default_quant_desc_weight(lowercase__)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : Any=False, UpperCamelCase_ : Optional[int]=False) -> int:
'''simple docstring'''
logger.info("Configuring Model for Quantization")
logger.info(F"""using quantization package {pytorch_quantization.__file__}""")
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowercase__, ["embeddings"], which="weight", _disabled=lowercase__)
if args.quant_disable:
set_quantizer_by_name(lowercase__, [""], _disabled=lowercase__)
if args.quant_disable_keyword:
set_quantizer_by_name(lowercase__, args.quant_disable_keyword, _disabled=lowercase__)
if args.quant_disable_layer_module:
set_quantizer_by_name(lowercase__, [r"layer.\d+." + args.quant_disable_layer_module], _disabled=lowercase__)
if args.quant_enable_layer_module:
set_quantizer_by_name(lowercase__, [r"layer.\d+." + args.quant_enable_layer_module], _disabled=lowercase__)
if args.recalibrate_weights:
recalibrate_weights(lowercase__)
if args.fuse_qkv:
fuse_qkv(lowercase__, lowercase__)
if args.clip_gelu:
clip_gelu(lowercase__, args.clip_gelu)
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowercase__)
def _A ( UpperCamelCase_ : Any) -> Any:
'''simple docstring'''
logger.info("Enabling Calibration")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""")
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
logger.info("Loading calibrated amax")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax("percentile", percentile=args.percentile)
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowercase__)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : List[str]) -> Union[str, Any]:
'''simple docstring'''
def fusea(UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : str):
for mod in [qq, qk, qv]:
if not hasattr(lowercase__, "_amax"):
print(" WARNING: NO AMAX BUFFER")
return
__lowercase = qq._amax.detach().item()
__lowercase = qk._amax.detach().item()
__lowercase = qv._amax.detach().item()
__lowercase = max(lowercase__, lowercase__, lowercase__)
qq._amax.fill_(lowercase__)
qk._amax.fill_(lowercase__)
qv._amax.fill_(lowercase__)
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""")
for name, mod in model.named_modules():
if name.endswith(".attention.self"):
logger.info(F"""FUSE_QKV: {name:{name_width}}""")
fusea(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer)
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer)
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense") and not name.endswith("attention.output.dense"):
__lowercase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowercase__)
__lowercase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""")
def _A ( UpperCamelCase_ : List[Any]) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowercase__, "_weight_quantizer") and mod._weight_quantizer.axis is not None:
__lowercase = mod.weight.shape[0]
__lowercase = mod._weight_quantizer._amax.detach()
__lowercase = torch.ones(lowercase__, dtype=amax.dtype, device=amax.device) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""")
def _A ( UpperCamelCase_ : str) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowercase__, "_weight_quantizer"):
if not hasattr(mod.weight_quantizer, "_amax"):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER")
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__lowercase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis)
__lowercase = set(range(len(mod.weight.size()))) - axis_set
__lowercase = pytorch_quantization.utils.reduce_amax(mod.weight, axis=lowercase__, keepdims=lowercase__).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""")
__lowercase = amax
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Union[str, Any]=25, UpperCamelCase_ : str=180, UpperCamelCase_ : Optional[int]=None) -> Optional[int]:
'''simple docstring'''
if ignore is None:
__lowercase = []
elif not isinstance(lowercase__, lowercase__):
__lowercase = [ignore]
__lowercase = 0
for name, mod in model.named_modules():
if not hasattr(lowercase__, "weight"):
continue
__lowercase = max(lowercase__, len(lowercase__))
for name, mod in model.named_modules():
__lowercase = getattr(lowercase__, "_input_quantizer", lowercase__)
__lowercase = getattr(lowercase__, "_weight_quantizer", lowercase__)
if not hasattr(lowercase__, "weight"):
continue
if type(lowercase__) in ignore:
continue
if [True for s in ignore if type(lowercase__) is str and s in name]:
continue
__lowercase = F"""Act:{input_q.extra_repr()}"""
__lowercase = F"""Wgt:{weight_q.extra_repr()}"""
__lowercase = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(lowercase__) <= line_width:
logger.info(lowercase__)
else:
logger.info(F"""{name:{name_width}} {act_str}""")
logger.info(F"""{" ":{name_width}} {wgt_str}""")
def _A ( UpperCamelCase_ : Union[str, Any]) -> Optional[int]:
'''simple docstring'''
__lowercase = 0
for name, mod in model.named_modules():
if isinstance(lowercase__, pytorch_quantization.nn.TensorQuantizer):
print(F"""{name:80} {mod}""")
count += 1
print(F"""{count} TensorQuantizers found in model""")
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> List[Any]:
'''simple docstring'''
__lowercase = getattr(lowercase__, lowercase__, lowercase__)
if quantizer_mod is not None:
assert hasattr(lowercase__, lowercase__)
setattr(lowercase__, lowercase__, lowercase__)
else:
logger.warning(F"""{name} has no {quantizer}""")
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Dict="both", **UpperCamelCase_ : Optional[Any]) -> Tuple:
'''simple docstring'''
__lowercase = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(lowercase__, lowercase__, "_input_quantizer", lowercase__, lowercase__)
if which in ["weight", "both"]:
set_quantizer(lowercase__, lowercase__, "_weight_quantizer", lowercase__, lowercase__)
logger.info(lowercase__)
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : str, **UpperCamelCase_ : Tuple) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowercase__, "_input_quantizer") or hasattr(lowercase__, "_weight_quantizer"):
for n in names:
if re.search(lowercase__, lowercase__):
set_quantizers(lowercase__, lowercase__, **lowercase__)
elif name.endswith("_quantizer"):
for n in names:
if re.search(lowercase__, lowercase__):
__lowercase = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(lowercase__, lowercase__, lowercase__)
logger.info(lowercase__)
| 17 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states | 96 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = VQModel
UpperCAmelCase__ : Union[str, Any] = "sample"
@property
def _a ( self , A_=(32, 32) ) -> Dict:
__UpperCamelCase =4
__UpperCamelCase =3
__UpperCamelCase =floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
return {"sample": image}
@property
def _a ( self ) -> List[str]:
return (3, 32, 32)
@property
def _a ( self ) -> int:
return (3, 32, 32)
def _a ( self ) -> List[Any]:
__UpperCamelCase ={
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> Union[str, Any]:
pass
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(A_ )
__UpperCamelCase =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a ( self ) -> Dict:
__UpperCamelCase =VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(A_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__UpperCamelCase =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__UpperCamelCase =image.to(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ ).sample
__UpperCamelCase =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCamelCase =torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
| 62 |
"""simple docstring"""
lowercase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowercase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = from_type.lower().strip('s' )
_lowerCamelCase : List[Any] = to_type.lower().strip('s' )
_lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
_lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
if from_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Tuple = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
if to_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Any = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
_lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized]
_lowerCamelCase : int = METRIC_CONVERSION[to_sanitized]
_lowerCamelCase : List[str] = 1
if from_exponent > to_exponent:
_lowerCamelCase : List[str] = from_exponent - to_exponent
else:
_lowerCamelCase : List[Any] = -(to_exponent - from_exponent)
return value * pow(10 , lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 96 | 0 |
"""simple docstring"""
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class a ( __snake_case ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
lowerCamelCase_ = data
def __iter__( self : Optional[Any] ) -> int:
for element in self.data:
yield element
def lowerCamelCase__ ( _lowerCamelCase : int=True ) -> Optional[Any]:
lowerCamelCase_ = Accelerator(even_batches=lowercase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] = False ) -> str:
if iterable:
lowerCamelCase_ = DummyIterableDataset(torch.as_tensor(range(lowercase__ ) ) )
else:
lowerCamelCase_ = TensorDataset(torch.as_tensor(range(lowercase__ ) ) )
lowerCamelCase_ = DataLoader(lowercase__ , batch_size=lowercase__ )
lowerCamelCase_ = accelerator.prepare(lowercase__ )
return dl
def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Dict , ) -> Optional[Any]:
lowerCamelCase_ = create_dataloader(accelerator=lowercase__ , dataset_size=lowercase__ , batch_size=lowercase__ )
lowerCamelCase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase__ ( ) -> Optional[int]:
lowerCamelCase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowercase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowercase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase__ ( ) -> Optional[Any]:
lowerCamelCase_ = create_accelerator(even_batches=lowercase__ )
verify_dataloader_batch_sizes(
lowercase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowercase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase__ ( ) -> Union[str, Any]:
lowerCamelCase_ = create_accelerator(even_batches=lowercase__ )
lowerCamelCase_ = torch.nn.Linear(1 , 1 )
lowerCamelCase_ = accelerator.prepare(lowercase__ )
lowerCamelCase_ = create_dataloader(lowercase__ , dataset_size=3 , batch_size=1 )
lowerCamelCase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowercase__ ):
lowerCamelCase_ = ddp_model(batch[0].float() )
lowerCamelCase_ = output.sum()
loss.backward()
batch_idxs.append(lowercase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] ) -> List[str]:
with warnings.catch_warnings(record=lowercase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowercase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCamelCase__ ( ) -> int:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = create_accelerator(even_batches=lowercase__ )
lowerCamelCase_ = torch.nn.Linear(1 , 1 )
lowerCamelCase_ = accelerator.prepare(lowercase__ )
lowerCamelCase_ = create_dataloader(lowercase__ , dataset_size=3 , batch_size=1 )
lowerCamelCase_ = create_dataloader(lowercase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowercase__ ):
lowerCamelCase_ = train_dl.batch_sampler.even_batches
lowerCamelCase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase__ ( ) -> str:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = create_accelerator(even_batches=lowercase__ )
lowerCamelCase_ = torch.nn.Linear(1 , 1 )
lowerCamelCase_ = accelerator.prepare(lowercase__ )
create_dataloader(lowercase__ , dataset_size=3 , batch_size=1 , iterable=lowercase__ )
lowerCamelCase_ = create_dataloader(lowercase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowercase__ ):
lowerCamelCase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase__ ( ) -> Optional[int]:
lowerCamelCase_ = create_accelerator()
lowerCamelCase_ = torch.nn.Linear(1 , 1 )
lowerCamelCase_ = accelerator.prepare(lowercase__ )
create_dataloader(lowercase__ , dataset_size=3 , batch_size=1 , iterable=lowercase__ )
with warnings.catch_warnings(record=lowercase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowercase__ ):
pass
assert issubclass(w[-1].category , lowercase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCamelCase__ ( ) -> Union[str, Any]:
lowerCamelCase_ = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
lowerCamelCase_ = accelerator.state.distributed_type
lowerCamelCase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowercase__ )
lowerCamelCase_ = original_state
if __name__ == "__main__":
main()
| 183 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
def lowercase_ (A : Union[str, Any] ):
snake_case__ : Optional[Any] = 1
snake_case__ : Tuple = 2
while i * i <= n:
snake_case__ : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase_ ():
snake_case__ : str = 1
snake_case__ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(lowercase__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
a__ = """encoder-decoder"""
a__ = True
def __init__( self , **lowercase) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a__: int = kwargs.pop('encoder')
a__: Tuple = encoder_config.pop('model_type')
a__: int = kwargs.pop('decoder')
a__: List[str] = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
a__: Optional[int] = AutoConfig.for_model(lowercase , **lowercase)
a__: Tuple = AutoConfig.for_model(lowercase , **lowercase)
a__: Dict = True
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
a__: Optional[Any] = True
a__: Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[Any] = copy.deepcopy(self.__dict__)
a__: Dict = self.encoder.to_dict()
a__: Optional[Any] = self.decoder.to_dict()
a__: List[str] = self.__class__.model_type
return output
| 290 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import numpy as np
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: str , __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[str] ) -> Optional[int]:
UpperCamelCase__ : List[str] = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase__ : Any = np.zeros((n + 1,) )
UpperCamelCase__ : Optional[int] = ya
UpperCamelCase__ : Dict = xa
for k in range(lowercase__ ):
UpperCamelCase__ : Any = f(lowercase__ , y[k] )
UpperCamelCase__ : str = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ : Tuple = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ : Optional[Any] = f(x + h , y[k] + h * ka )
UpperCamelCase__ : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
"""simple docstring"""
import functools
from typing import Any
def _snake_case ( lowercase__ , lowercase__ ):
# Validation
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase__ , lowercase__ ) or not all(
isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase : dict[str, Any] = {}
_lowerCamelCase : List[Any] = 'WORD_KEEPER'
for word in words:
_lowerCamelCase : Dict = trie
for c in word:
if c not in trie_node:
_lowerCamelCase : Any = {}
_lowerCamelCase : str = trie_node[c]
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Dict = len(lowercase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase__ ) -> bool:
if index == len_string:
return True
_lowerCamelCase : List[Any] = trie
for i in range(lowercase__ , lowercase__ ):
_lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ )
if trie_node is None:
return False
if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 218 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''efficientformer'''
def __init__( self : Optional[Any] , A : Union[str, Any] = [3, 2, 6, 4] , A : Tuple = [48, 96, 2_24, 4_48] , A : Any = [True, True, True, True] , A : int = 4_48 , A : Tuple = 32 , A : Optional[Any] = 4 , A : List[Any] = 7 , A : str = 5 , A : List[str] = 8 , A : str = 4 , A : Optional[Any] = 0.0 , A : List[str] = 16 , A : List[str] = 3 , A : str = 3 , A : Optional[Any] = 3 , A : List[Any] = 2 , A : List[Any] = 1 , A : str = 0.0 , A : str = 1 , A : Optional[Any] = True , A : List[Any] = True , A : List[str] = 1E-5 , A : Optional[int] = "gelu" , A : str = 0.0_2 , A : int = 1E-12 , A : Tuple = 2_24 , A : List[str] = 1E-05 , **A : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_expansion_ratio
_UpperCAmelCase = downsamples
_UpperCAmelCase = dim
_UpperCAmelCase = key_dim
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = resolution
_UpperCAmelCase = pool_size
_UpperCAmelCase = downsample_patch_size
_UpperCAmelCase = downsample_stride
_UpperCAmelCase = downsample_pad
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = num_metaad_blocks
_UpperCAmelCase = distillation
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = image_size
_UpperCAmelCase = batch_norm_eps
| 339 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ = 16
lowercase__ = 32
def _snake_case ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ):
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ )
_lowerCamelCase : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : int = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCamelCase : int = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ , lowercase__ ):
# Initialize accelerator
_lowerCamelCase : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Optional[int] = config['lr']
_lowerCamelCase : Optional[int] = int(config['num_epochs'] )
_lowerCamelCase : Union[str, Any] = int(config['seed'] )
_lowerCamelCase : Optional[int] = int(config['batch_size'] )
_lowerCamelCase : Dict = args.model_name_or_path
set_seed(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
_lowerCamelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCamelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCamelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCamelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
_lowerCamelCase : Any = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCamelCase : Dict = 0
# Now we train the model
_lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
_lowerCamelCase : List[Any] = model(**lowercase__ )
_lowerCamelCase : int = outputs.loss
_lowerCamelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCamelCase : Union[str, Any] = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**lowercase__ )
_lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCamelCase, _lowerCamelCase : List[str] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
_lowerCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
_lowerCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
_lowerCamelCase : Tuple = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_lowerCamelCase : str = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ):
_lowerCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowercase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase__ , )
parser.add_argument(
'--output_dir' , type=lowercase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowercase__ , default=lowercase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowercase__ , default=3 , help='Number of train epochs.' , )
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[Any]=7 ) -> Optional[int]:
_a : List[str] =None
if token is not None:
_a : Dict ={'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_a : int ='636036'
_a : str =F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_a : Optional[int] =requests.get(lowercase__ ,headers=lowercase__ ).json()
return result["workflow_runs"]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
_a : Optional[Any] =get_daily_ci_runs(lowercase__ )
_a : List[str] =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_a : Optional[Any] =workflow_run['id']
break
return workflow_run_id
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Optional[int] =get_last_daily_ci_runs(lowercase__ )
if workflow_run_id is not None:
_a : Tuple =get_artifacts_links(worflow_run_id=lowercase__ ,token=lowercase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_a : Dict =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase__ ,artifact_url=lowercase__ ,output_dir=lowercase__ ,token=lowercase__ )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> int:
get_last_daily_ci_artifacts(lowercase__ ,lowercase__ ,lowercase__ )
_a : Any ={}
for artifact_name in artifact_names:
_a : Dict =os.path.join(lowercase__ ,F"{artifact_name}.zip" )
if os.path.isfile(lowercase__ ):
_a : Any ={}
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
with z.open(lowercase__ ) as f:
_a : Tuple =f.read().decode("""UTF-8""" )
return results
| 276 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """new-model"""
if is_tf_available():
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def A_ ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
_lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCamelCase : Dict = ['FunnelBaseModel']
_lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
try:
AutoConfig.register('new-model' , lowercase )
_lowerCamelCase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() )
_lowerCamelCase : int = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A_ ( self ):
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def A_ ( self ):
# Make sure we have cached the model.
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 96 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Dict = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a ( _lowerCamelCase ):
snake_case_ = "realm"
def __init__( self : int , lowercase_ : Optional[Any]=3_0522 , lowercase_ : List[str]=768 , lowercase_ : Any=128 , lowercase_ : int=12 , lowercase_ : int=12 , lowercase_ : Tuple=8 , lowercase_ : Optional[Any]=3072 , lowercase_ : List[Any]="gelu_new" , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Any=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : str=1e-12 , lowercase_ : Tuple=256 , lowercase_ : List[str]=10 , lowercase_ : List[str]=1e-3 , lowercase_ : int=5 , lowercase_ : Tuple=320 , lowercase_ : int=1335_3718 , lowercase_ : Optional[int]=5000 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0 , lowercase_ : str=2 , **lowercase_ : Union[str, Any] , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
# Common config
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = retriever_proj_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = num_candidates
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
# Reader config
snake_case_ = span_hidden_size
snake_case_ = max_span_width
snake_case_ = reader_layer_norm_eps
snake_case_ = reader_beam_size
snake_case_ = reader_seq_len
# Retrieval config
snake_case_ = num_block_records
snake_case_ = searcher_beam_size
| 56 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(lowercase__ , exponent // 2 , lowercase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase__ , exponent - 1 , lowercase__ )) % modulo_value
def lowercase__ ( __UpperCamelCase = 1777 , __UpperCamelCase = 1855 , __UpperCamelCase = 8 )-> int:
UpperCamelCase = base
for _ in range(1 , lowercase__ ):
UpperCamelCase = _modexpt(lowercase__ , lowercase__ , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 321 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{sampling_rate}'''
_lowerCamelCase : str = '1'
_lowerCamelCase : str = 'f32le'
_lowerCamelCase : Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_lowerCamelCase : List[Any] = output_stream[0]
_lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ):
_lowerCamelCase : Optional[Any] = f'''{sampling_rate}'''
_lowerCamelCase : List[str] = '1'
if format_for_conversion == "s16le":
_lowerCamelCase : List[str] = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
_lowerCamelCase : Dict = platform.system()
if system == "Linux":
_lowerCamelCase : Optional[int] = 'alsa'
_lowerCamelCase : Optional[Any] = 'default'
elif system == "Darwin":
_lowerCamelCase : Optional[int] = 'avfoundation'
_lowerCamelCase : Any = ':0'
elif system == "Windows":
_lowerCamelCase : Tuple = 'dshow'
_lowerCamelCase : Tuple = 'default'
_lowerCamelCase : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ):
if stream_chunk_s is not None:
_lowerCamelCase : int = stream_chunk_s
else:
_lowerCamelCase : Optional[Any] = chunk_length_s
_lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
_lowerCamelCase : List[str] = np.intaa
_lowerCamelCase : str = 2
elif format_for_conversion == "f32le":
_lowerCamelCase : Any = np.floataa
_lowerCamelCase : List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
_lowerCamelCase : Union[str, Any] = chunk_length_s / 6
_lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
_lowerCamelCase : Any = [stride_length_s, stride_length_s]
_lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCamelCase : List[Any] = datetime.datetime.now()
_lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
_lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ )
_lowerCamelCase : int = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_lowerCamelCase : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
_lowerCamelCase : int = B''
_lowerCamelCase, _lowerCamelCase : Dict = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
_lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
_lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
_lowerCamelCase : str = (_stride_left, stride_right)
_lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_lowerCamelCase : List[Any] = False
yield item
_lowerCamelCase : Optional[Any] = stride_left
_lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
_lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_lowerCamelCase : Tuple = False
yield item
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
_lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 96 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : Dict) -> Union[str, Any]:
'''simple docstring'''
__lowercase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _A ( UpperCamelCase_ : List[Any], UpperCamelCase_ : str, UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
while b > 0:
if b & 1:
__lowercase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 17 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """ctrl"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = n_positions
_lowerCamelCase : Optional[int] = n_embd
_lowerCamelCase : str = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : Any = dff
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : str = use_cache
super().__init__(**lowercase ) | 96 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ) -> List[str]:
lowerCamelCase_ = from_type.lower().strip('s' )
lowerCamelCase_ = to_type.lower().strip('s' )
lowerCamelCase_ = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
lowerCamelCase_ = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
lowerCamelCase_ = METRIC_CONVERSION[from_sanitized]
lowerCamelCase_ = METRIC_CONVERSION[to_sanitized]
lowerCamelCase_ = 1
if from_exponent > to_exponent:
lowerCamelCase_ = from_exponent - to_exponent
else:
lowerCamelCase_ = -(to_exponent - from_exponent)
return value * pow(10 , lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 183 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = """dummy_data"""
lowerCamelCase__ = """datasets"""
lowerCamelCase__ = False
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ):
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Dict = dataset_name
_lowerCamelCase : Union[str, Any] = cache_dir
_lowerCamelCase : Dict = use_local_dummy_data
_lowerCamelCase : Tuple = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : str = str(lowercase )
# to be downloaded
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : int = None
@property
def A_ ( self ):
if self._dummy_file is None:
_lowerCamelCase : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def A_ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def A_ ( self ):
_lowerCamelCase : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : int = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase )
return os.path.join(lowercase , self.dummy_file_name )
@property
def A_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self ):
if self._bucket_url is None:
_lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def A_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def A_ ( self , lowercase , *lowercase ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase ):
return self.create_dummy_data_dict(lowercase , lowercase )
elif isinstance(lowercase , (list, tuple) ):
return self.create_dummy_data_list(lowercase , lowercase )
else:
return self.create_dummy_data_single(lowercase , lowercase )
def A_ ( self , lowercase , *lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , *lowercase , **lowercase ):
return path
def A_ ( self ):
return {}
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase ):
for single_url in single_urls:
download_callback(lowercase )
else:
_lowerCamelCase : List[Any] = single_urls
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase ):
_lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Optional[int] = single_urls
_lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) )
_lowerCamelCase : int = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url )
_lowerCamelCase : int = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : List[str] = [data_url[0]] * len(lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(lowercase )
return dummy_data_list
def A_ ( self , lowercase , lowercase ):
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self , lowercase ):
def _iter_archive_members(lowercase ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : str = Path(self.dummy_file ).parent
_lowerCamelCase : Union[str, Any] = path.relative_to(lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase )
_lowerCamelCase : Optional[int] = Path(lowercase )
_lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' )
def A_ ( self , lowercase ):
if not isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(lowercase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(lowercase , lowercase ) | 96 | 0 |
import unittest
from transformers import DonutProcessor
a_ :Optional[Any] = "naver-clova-ix/donut-base"
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) ->Tuple:
snake_case__ : Union[str, Any] = DonutProcessor.from_pretrained(_snake_case )
def lowercase_ ( self : Tuple ) ->int:
snake_case__ : str = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
snake_case__ : int = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
snake_case__ : str = self.processor.tokenajson(_snake_case )
self.assertDictEqual(_snake_case, _snake_case )
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 96 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __snake_case :
@staticmethod
def lowerCamelCase_ ( *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
pass
def __a ( _SCREAMING_SNAKE_CASE ) ->List[str]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase__ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
a__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = pipeline(
'document-question-answering' , model=lowercase , tokenizer=lowercase , image_processor=lowercase)
a__: Dict = INVOICE_URL
a__: List[str] = list(zip(*apply_tesseract(load_image(lowercase) , lowercase , '')))
a__: Optional[Any] = 'What is the placebo?'
a__: List[Any] = [
{
'image': load_image(lowercase),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase_ ( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: str = dqa_pipeline(lowercase , top_k=2)
self.assertEqual(
lowercase , [
[
{'score': ANY(lowercase), 'answer': ANY(lowercase), 'start': ANY(lowercase), 'end': ANY(lowercase)},
{'score': ANY(lowercase), 'answer': ANY(lowercase), 'start': ANY(lowercase), 'end': ANY(lowercase)},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2')
a__: Optional[Any] = INVOICE_URL
a__: List[Any] = 'How many cats are there?'
a__: Optional[int] = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
a__: str = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(nested_simplify(lowercase , decimals=4) , lowercase)
a__: int = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(nested_simplify(lowercase , decimals=4) , lowercase)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a__: List[str] = './tests/fixtures/tests_samples/COCO/000000039769.png'
a__: List[str] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(lowercase , [])
# We can optionnally pass directly the words and bounding boxes
a__: Optional[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
a__: Dict = []
a__: int = []
a__: Any = dqa_pipeline(image=lowercase , question=lowercase , words=lowercase , boxes=lowercase , top_k=2)
self.assertEqual(lowercase , [])
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[int] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
a__: Union[str, Any] = INVOICE_URL
a__: str = 'What is the invoice number?'
a__: Dict = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: Optional[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
a__: Optional[int] = INVOICE_URL
a__: List[Any] = 'What is the invoice number?'
a__: Tuple = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: List[Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase)
a__: Tuple = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , )
a__: Optional[Any] = INVOICE_URL
a__: Tuple = 'What is the invoice number?'
a__: List[Any] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a__: Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a__: int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
a__: List[Any] = list(zip(*apply_tesseract(load_image(lowercase) , lowercase , '')))
# This model should also work if `image` is set to None
a__: Optional[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase)
a__: Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , max_seq_len=50 , )
a__: Any = INVOICE_URL
a__: Tuple = 'What is the invoice number?'
a__: Any = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a__: Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
a__: Optional[int] = list(zip(*apply_tesseract(load_image(lowercase) , lowercase , '')))
# This model should also work if `image` is set to None
a__: Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2)
self.assertEqual(
nested_simplify(lowercase , decimals=4) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa') , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
a__: List[Any] = INVOICE_URL
a__: Any = 'What is the invoice number?'
a__: Dict = dqa_pipeline(image=lowercase , question=lowercase , top_k=2)
self.assertEqual(nested_simplify(lowercase , decimals=4) , [{'answer': 'us-001'}])
@require_tf
@unittest.skip('Document question answering not implemented in TF')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
pass
| 290 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
# add QFormer tokenizer
_lowerCamelCase : int = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
encoding.update(lowercase )
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
encoding.update(lowercase )
return encoding
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A_ ( self , lowercase , **lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase )
return super().save_pretrained(lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' )
_lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase )
args.append(lowercase )
return cls(*lowercase ) | 96 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase_ = [0, 25, 50]
UpperCAmelCase_ = [25, 50, 75]
UpperCAmelCase_ = fuzz.membership.trimf(X, abca)
UpperCAmelCase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase_ = np.ones(75)
UpperCAmelCase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 201 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
from __future__ import annotations
def UpperCamelCase_( _snake_case : List[Any] ):
"""simple docstring"""
__a =str(lowercase__ )
return n == n[::-1]
def UpperCamelCase_( _snake_case : Tuple = 1000000 ):
"""simple docstring"""
__a =0
for i in range(1 , lowercase__ ):
if is_palindrome(lowercase__ ) and is_palindrome(bin(lowercase__ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 218 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( lowercase__ , lowercase__ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def _snake_case ( lowercase__ , lowercase__ ):
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Optional[int] = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : int = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase__ )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
_lowerCamelCase : List[Any] = temp_dist
_lowerCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( lowercase__ , lowercase__ ):
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( A ):
UpperCamelCase = '''new-model'''
if is_tf_available():
class __lowerCAmelCase ( A ):
UpperCamelCase = NewModelConfig
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = 'bert-base-cased'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModel.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = 'bert-base-cased'
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelForPreTraining.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(A)
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(A , output_loading_info=A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(A)
_UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(A , output_loading_info=A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(A)
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(A , output_loading_info=A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
@slow
@require_tensorflow_probability
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_UpperCAmelCase = AutoConfig.from_pretrained(A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
_UpperCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(A)
_UpperCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
A , output_loading_info=A)
self.assertIsNotNone(A)
self.assertIsInstance(A , A)
def _lowerCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(A)
self.assertIsInstance(A , A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=A) , 1_44_10)
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(A)
self.assertIsInstance(A , A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=A) , 1_44_10)
def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(A , A)
_UpperCAmelCase = copy.deepcopy(model.config)
_UpperCAmelCase = ['FunnelBaseModel']
_UpperCAmelCase = TFAutoModel.from_config(A)
self.assertIsInstance(A , A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A)
_UpperCAmelCase = TFAutoModel.from_pretrained(A)
self.assertIsInstance(A , A)
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('new-model' , A)
_UpperCAmelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(A):
auto_class.register(A , A)
auto_class.register(A , A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A):
auto_class.register(A , A)
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = BertModelTester(self).get_config()
_UpperCAmelCase = NewModelConfig(**tiny_config.to_dict())
_UpperCAmelCase = auto_class.from_config(A)
self.assertIsInstance(A , A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A)
_UpperCAmelCase = auto_class.from_pretrained(A)
self.assertIsInstance(A , A)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
A , 'bert-base is not a local folder and is not a valid model identifier'):
_UpperCAmelCase = TFAutoModel.from_pretrained('bert-base')
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_UpperCAmelCase = TFAutoModel.from_pretrained(A , revision='aaaaaa')
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
A , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_UpperCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(A , 'Use `from_pt=True` to load this model'):
_UpperCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
_UpperCAmelCase = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
_UpperCAmelCase = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
_UpperCAmelCase = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 339 |
"""simple docstring"""
import socket
def _snake_case ( ):
_lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase : Union[str, Any] = socket.gethostname()
_lowerCamelCase : List[Any] = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_lowerCamelCase : int = sock.recv(1024 )
if not data:
break
out_file.write(lowercase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ) -> str:
_a : List[Any] =len(lowercase__ )
_a : Optional[Any] =int(math.floor(math.sqrt(lowercase__ ) ) )
_a : Optional[int] =0
while arr[min(lowercase__ ,lowercase__ ) - 1] < x:
_a : Tuple =step
step += int(math.floor(math.sqrt(lowercase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_a : str =prev + 1
if prev == min(lowercase__ ,lowercase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A__: Dict = input('''Enter numbers separated by a comma:\n''').strip()
A__: Union[str, Any] = [int(item) for item in user_input.split(''',''')]
A__: Union[str, Any] = int(input('''Enter the number to be searched:\n'''))
A__: Any = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"Number {x} is at index {res}")
| 276 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowercase__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowercase__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ):
_lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ )
_lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCamelCase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCamelCase : Optional[Any] = [(pred, label)]
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ )
_lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' )
fas.append(lowercase__ )
_lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
_lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) )
_lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ )
_lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def A_ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def A_ ( self , lowercase , lowercase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "cb":
return acc_and_fa(lowercase , lowercase , fa_avg='macro' )
elif self.config_name == "record":
_lowerCamelCase : List[str] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowercase , lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase , lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) | 96 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> str:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 56 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 96 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = patch_size
UpperCamelCase = max_length
UpperCamelCase = num_mel_bins
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = frequency_stride
UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase = frequency_out_dimension * time_out_dimension
UpperCamelCase = num_patches + 2
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, input_values, labels
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ASTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ASTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def A__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ASTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__ ( )-> List[str]:
UpperCamelCase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCamelCase = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.default_feature_extractor
UpperCamelCase = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_feature_extractor
UpperCamelCase = prepare_audio()
UpperCamelCase = audio.squeeze().numpy()
UpperCamelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 321 |
"""simple docstring"""
# Imports
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
if red is not None:
_lowerCamelCase : Optional[int] = red
if green is not None:
_lowerCamelCase : Optional[Any] = green
if blue is not None:
_lowerCamelCase : Tuple = blue
if red_edge is not None:
_lowerCamelCase : Optional[Any] = red_edge
if nir is not None:
_lowerCamelCase : Union[str, Any] = nir
return True
def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
_lowerCamelCase : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A_ ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A_ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A_ ( self ):
return self.nir * (self.red / (self.green**2))
def A_ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A_ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def A_ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def A_ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A_ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def A_ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A_ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A_ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A_ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A_ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A_ ( self ):
return (self.nir / self.green) - 1
def A_ ( self ):
return (self.nir / self.redEdge) - 1
def A_ ( self ):
return (self.red - self.blue) / self.red
def A_ ( self ):
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A_ ( self ):
return self.nir - self.green
def A_ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A_ ( self ):
_lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A_ ( self , lowercase=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def A_ ( self , lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A_ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A_ ( self , lowercase=None , lowercase=None ):
return (self.nir - b) / (a * self.red)
def A_ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A_ ( self ):
return (self.red + self.green + self.blue) / 30.5
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def A_ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A_ ( self ):
return self.green / (self.nir + self.red + self.green)
def A_ ( self ):
return self.nir / (self.nir + self.red + self.green)
def A_ ( self ):
return self.red / (self.nir + self.red + self.green)
def A_ ( self ):
return (self.green - self.red) / (self.green + self.red)
def A_ ( self ):
return (self.red - self.green) / (self.red + self.green)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A_ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def A_ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Dict, UpperCamelCase_ : int) -> Tuple:
'''simple docstring'''
def constraint_to_multiple_of(UpperCamelCase_ : Optional[int], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Union[str, Any]=0, UpperCamelCase_ : Tuple=None):
__lowercase = round(val / multiple) * multiple
if max_val is not None and x > max_val:
__lowercase = math.floor(val / multiple) * multiple
if x < min_val:
__lowercase = math.ceil(val / multiple) * multiple
return x
__lowercase = (output_size, output_size) if isinstance(lowercase__, lowercase__) else output_size
__lowercase = get_image_size(lowercase__)
__lowercase = output_size
# determine new height and width
__lowercase = output_height / input_height
__lowercase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
__lowercase = scale_width
else:
# fit height
__lowercase = scale_height
__lowercase = constraint_to_multiple_of(scale_height * input_height, multiple=lowercase__)
__lowercase = constraint_to_multiple_of(scale_width * input_width, multiple=lowercase__)
return (new_height, new_width)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = ["pixel_values"]
def __init__( self : List[str], UpperCAmelCase__ : List[Any] = True, UpperCAmelCase__ : int = None, UpperCAmelCase__ : str = PILImageResampling.BILINEAR, UpperCAmelCase__ : Tuple = False, UpperCAmelCase__ : Optional[Any] = 1, UpperCAmelCase__ : List[Any] = True, UpperCAmelCase__ : str = 1 / 2_5_5, UpperCAmelCase__ : Optional[Any] = True, UpperCAmelCase__ : Tuple = None, UpperCAmelCase__ : int = None, **UpperCAmelCase__ : Tuple, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = keep_aspect_ratio
__lowercase = ensure_multiple_of
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Any, UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] = False, UpperCAmelCase__ : List[Any] = 1, UpperCAmelCase__ : int = PILImageResampling.BICUBIC, UpperCAmelCase__ : List[Any] = None, **UpperCAmelCase__ : Optional[Any], ):
__lowercase = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
__lowercase = get_resize_output_image_size(
UpperCAmelCase__, output_size=(size["height"], size["width"]), keep_aspect_ratio=UpperCAmelCase__, multiple=UpperCAmelCase__, )
return resize(UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any, UpperCAmelCase__ : str = None, **UpperCAmelCase__ : int, ):
return rescale(UpperCAmelCase__, scale=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : Any, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : str = None, **UpperCAmelCase__ : Optional[Any], ):
return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple = None, UpperCAmelCase__ : Optional[Any] = None, UpperCAmelCase__ : Union[str, Any] = None, UpperCAmelCase__ : Union[str, Any] = None, UpperCAmelCase__ : Any = None, UpperCAmelCase__ : List[str] = None, UpperCAmelCase__ : Optional[Any] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : List[str] = None, UpperCAmelCase__ : Tuple = None, UpperCAmelCase__ : Any = ChannelDimension.FIRST, **UpperCAmelCase__ : Tuple, ):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowercase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=UpperCAmelCase__, scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(UpperCAmelCase__, UpperCAmelCase__ ) for image in images]
__lowercase = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase__, tensor_type=UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Dict = None ):
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(UpperCAmelCase__ ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=UpperCAmelCase__ )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states | 96 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Dict:
__UpperCamelCase =data
__UpperCamelCase =None
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__UpperCamelCase =None
__UpperCamelCase =None
def __iter__( self ) -> List[str]:
__UpperCamelCase =self.head
while self.head:
yield node.data
__UpperCamelCase =node.next
if node == self.head:
break
def __len__( self ) -> str:
return sum(1 for _ in self )
def __repr__( self ) -> Dict:
return "->".join(str(A_ ) for item in iter(self ) )
def _a ( self , A_ ) -> Any:
self.insert_nth(len(self ) , A_ )
def _a ( self , A_ ) -> Any:
self.insert_nth(0 , A_ )
def _a ( self , A_ , A_ ) -> List[Any]:
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
__UpperCamelCase =Node(A_ )
if self.head is None:
__UpperCamelCase =new_node # first node points itself
__UpperCamelCase =new_node
elif index == 0: # insert at head
__UpperCamelCase =self.head
__UpperCamelCase =new_node
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =new_node
if index == len(self ) - 1: # insert at tail
__UpperCamelCase =new_node
def _a ( self ) -> Optional[int]:
return self.delete_nth(0 )
def _a ( self ) -> Optional[int]:
return self.delete_nth(len(self ) - 1 )
def _a ( self , A_ = 0 ) -> str:
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
__UpperCamelCase =self.head
if self.head == self.tail: # just one node
__UpperCamelCase =None
elif index == 0: # delete head node
__UpperCamelCase =self.tail.next.next
__UpperCamelCase =self.head.next
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next.next
if index == len(self ) - 1: # delete at tail
__UpperCamelCase =temp
return delete_node.data
def _a ( self ) -> Union[str, Any]:
return len(self ) == 0
def _UpperCAmelCase ( ):
__UpperCamelCase =CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
"""simple docstring"""
lowercase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowercase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = from_type.lower().strip('s' )
_lowerCamelCase : List[Any] = to_type.lower().strip('s' )
_lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
_lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ )
if from_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Tuple = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
if to_sanitized not in METRIC_CONVERSION:
_lowerCamelCase : Any = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
_lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized]
_lowerCamelCase : int = METRIC_CONVERSION[to_sanitized]
_lowerCamelCase : List[str] = 1
if from_exponent > to_exponent:
_lowerCamelCase : List[str] = from_exponent - to_exponent
else:
_lowerCamelCase : List[Any] = -(to_exponent - from_exponent)
return value * pow(10 , lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod() | 96 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class a :
def __init__( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ = psutil.Process()
lowerCamelCase_ = False
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowerCamelCase_ = -1
while True:
lowerCamelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase ( self : List[Any] ) -> List[str]:
lowerCamelCase_ = True
lowerCamelCase_ = threading.Thread(target=self.peak_monitor )
lowerCamelCase_ = True
self.thread.start()
def UpperCamelCase ( self : int ) -> Any:
lowerCamelCase_ = False
self.thread.join()
return self.cpu_memory_peak
_SCREAMING_SNAKE_CASE : Optional[Any] = PeakCPUMemory()
def lowerCamelCase__ ( ) -> Union[str, Any]:
# Time
lowerCamelCase_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase_ = torch.cuda.memory_allocated(lowercase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCamelCase__ ( _lowerCamelCase : Tuple ) -> Optional[Any]:
# Time
lowerCamelCase_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase_ = (torch.cuda.memory_allocated(lowercase__ ) - start_measures[str(lowercase__ )]) / 2**20
lowerCamelCase_ = (torch.cuda.max_memory_allocated(lowercase__ ) - start_measures[str(lowercase__ )]) / 2**20
return measures
def lowerCamelCase__ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ) -> List[Any]:
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(lowercase__ )]:.2f}MiB''' )
lowerCamelCase_ = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 183 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
from typing import Any
def lowercase_ (A : Dict ):
if not input_list:
return []
snake_case__ : Dict = [input_list.count(lowercase__ ) for value in input_list]
snake_case__ : List[str] = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
return np.sqrt(np.sum((np.asarray(lowercase__ ) - np.asarray(lowercase__ )) ** 2 ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
return sum((va - va) ** 2 for va, va in zip(lowercase__ , lowercase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def __a ( ) ->Tuple:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 290 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Dict=[] ) -> Optional[int]:
UpperCamelCase__ : Dict = size[0] - overlap_pixels * 2
UpperCamelCase__ : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCamelCase__ : Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCamelCase__ : List[str] = np.pad(lowercase__ , mode='''linear_ramp''' , pad_width=lowercase__ , end_values=0 )
if "l" in remove_borders:
UpperCamelCase__ : Tuple = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCamelCase__ : Any = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCamelCase__ : Optional[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCamelCase__ : Union[str, Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[Any] ) -> str:
return max(lowercase__ , min(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Any ) -> Tuple:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> Union[str, Any]:
UpperCamelCase__ : Any = list(lowercase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCamelCase__ : str = clamp_rect(lowercase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Any , __UpperCAmelCase: Tuple , __UpperCAmelCase: Tuple ) -> Optional[Any]:
UpperCamelCase__ : Tuple = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowercase__ , (original_slice, 0) )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Union[str, Any] ) -> Dict:
UpperCamelCase__ : Optional[int] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCamelCase__ : Optional[int] = tile.crop(lowercase__ )
return tile
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple ) -> List[str]:
UpperCamelCase__ : Tuple = n % d
return n - divisor
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = 350, ) -> List[str]:
"""simple docstring"""
super().__init__(
vae=__magic_name__, text_encoder=__magic_name__, tokenizer=__magic_name__, unet=__magic_name__, low_res_scheduler=__magic_name__, scheduler=__magic_name__, max_noise_level=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice), x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice), y * tile_size ),
min(image.size[0], (x + 1) * tile_size ),
min(image.size[1], (y + 1) * tile_size ),
)
UpperCamelCase__ : str = add_overlap_rect(__magic_name__, __magic_name__, image.size )
UpperCamelCase__ : Dict = image.crop(__magic_name__ )
UpperCamelCase__ : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCamelCase__ : List[str] = translated_slice_x - (original_image_slice / 2)
UpperCamelCase__ : List[Any] = max(0, __magic_name__ )
UpperCamelCase__ : Optional[Any] = squeeze_tile(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
UpperCamelCase__ : int = to_input.size
UpperCamelCase__ : Union[str, Any] = to_input.resize((tile_size, tile_size), Image.BICUBIC )
UpperCamelCase__ : Union[str, Any] = super(__magic_name__, self ).__call__(image=__magic_name__, **__magic_name__ ).images[0]
UpperCamelCase__ : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC )
UpperCamelCase__ : List[Any] = unsqueeze_tile(__magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC )
UpperCamelCase__ : Dict = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
UpperCamelCase__ : str = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=__magic_name__ ), mode='''L''', )
final_image.paste(
__magic_name__, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), __magic_name__ )
@torch.no_grad()
def __call__( self, __magic_name__, __magic_name__, __magic_name__ = 75, __magic_name__ = 9.0, __magic_name__ = 50, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 128, __magic_name__ = 32, __magic_name__ = 32, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = Image.new('''RGB''', (image.size[0] * 4, image.size[1] * 4) )
UpperCamelCase__ : Optional[int] = math.ceil(image.size[0] / tile_size )
UpperCamelCase__ : Optional[Any] = math.ceil(image.size[1] / tile_size )
UpperCamelCase__ : Dict = tcx * tcy
UpperCamelCase__ : List[str] = 0
for y in range(__magic_name__ ):
for x in range(__magic_name__ ):
self._process_tile(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, prompt=__magic_name__, num_inference_steps=__magic_name__, guidance_scale=__magic_name__, noise_level=__magic_name__, negative_prompt=__magic_name__, num_images_per_prompt=__magic_name__, eta=__magic_name__, generator=__magic_name__, latents=__magic_name__, )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCAmelCase_ ( ) -> Dict:
# Run a demo
UpperCamelCase__ : int = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCamelCase__ : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase__ , revision='''fp16''' , torch_dtype=torch.floataa )
UpperCamelCase__ : Optional[int] = pipe.to('''cuda''' )
UpperCamelCase__ : List[Any] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__UpperCAmelCase: str ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save('''diffusers_library_progress.jpg''' )
UpperCamelCase__ : str = pipe(image=lowercase__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=lowercase__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 201 |
"""simple docstring"""
import functools
from typing import Any
def _snake_case ( lowercase__ , lowercase__ ):
# Validation
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase__ , lowercase__ ) or not all(
isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase : dict[str, Any] = {}
_lowerCamelCase : List[Any] = 'WORD_KEEPER'
for word in words:
_lowerCamelCase : Dict = trie
for c in word:
if c not in trie_node:
_lowerCamelCase : Any = {}
_lowerCamelCase : str = trie_node[c]
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Dict = len(lowercase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase__ ) -> bool:
if index == len_string:
return True
_lowerCamelCase : List[Any] = trie
for i in range(lowercase__ , lowercase__ ):
_lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ )
if trie_node is None:
return False
if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
import mpmath # for roots of unity
import numpy as np
class __magic_name__ :
def __init__( self , __snake_case=None , __snake_case=None ) -> Tuple:
'''simple docstring'''
# Input as list
__a =list(poly_a or [0] )[:]
__a =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__a =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__a =len(self.polyB )
# Add 0 to make lengths equal a power of 2
__a =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__a =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__a =self.__multiply()
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
__a =[[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(__snake_case ) <= 1:
return dft[0]
#
__a =self.c_max_length // 2
while next_ncol > 0:
__a =[[] for i in range(__snake_case )]
__a =self.root**next_ncol
# First half of next step
__a =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__snake_case ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__a =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__snake_case ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__a =new_dft
__a =next_ncol // 2
return dft[0]
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.__dft('A' )
__a =self.__dft('B' )
__a =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__a =2
while next_ncol <= self.c_max_length:
__a =[[] for i in range(__snake_case )]
__a =self.root ** (next_ncol // 2)
__a =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__a =new_inverse_c
next_ncol *= 2
# Unpack
__a =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Optional[Any]:
'''simple docstring'''
__a ='A = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__a ='B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__a ='A*B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = len(UpperCamelCase_ )
UpperCamelCase__ :str = [0] * len_array
if len_array > 0:
UpperCamelCase__ :Optional[Any] = array[0]
for i in range(1 , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 1 |
'''simple docstring'''
import math
def a ( __a ) -> bool:
'''simple docstring'''
assert isinstance(__a , __a ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase__ :List[str] = range(3 , int(math.sqrt(__a ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( __a , __a=1 , **__a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = factor * value
UpperCamelCase__ :List[Any] = value
while not is_prime(__a ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__a )
return value | 97 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 1 |
'''simple docstring'''
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = [], []
while len(__a ) > 1:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = min(__a ), max(__a )
start.append(__a )
end.append(__a )
collection.remove(__a )
collection.remove(__a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''') | 97 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return np.maximum(0 , __a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5] | 97 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def a ( __a ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def a ( __a ) -> List[str]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ :List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a ) | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = set_counts
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = [1] * num_sets
UpperCamelCase__ :List[Any] = list(range(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_parent(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ :Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :str = src_parent
UpperCamelCase__ :Optional[Any] = self.set_counts[src_parent]
UpperCamelCase__ :Tuple = max(self.max_set , UpperCamelCase_ )
return True
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ :Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 97 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a ( __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Dict = {}
UpperCamelCase__ :Tuple = job['''started_at''']
UpperCamelCase__ :Union[str, Any] = job['''completed_at''']
UpperCamelCase__ :Dict = date_parser.parse(__a )
UpperCamelCase__ :int = date_parser.parse(__a )
UpperCamelCase__ :Any = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
UpperCamelCase__ :Optional[Any] = start
UpperCamelCase__ :Tuple = end
UpperCamelCase__ :Optional[Any] = duration_in_min
return job_info
def a ( __a , __a=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = None
if token is not None:
UpperCamelCase__ :List[str] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
UpperCamelCase__ :Tuple = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
UpperCamelCase__ :Dict = requests.get(__a , headers=__a ).json()
UpperCamelCase__ :Any = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(__a ) for job in result['''jobs''']} )
UpperCamelCase__ :str = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__a ):
UpperCamelCase__ :Any = requests.get(url + f'''&page={i + 2}''' , headers=__a ).json()
job_time.update({job['''name''']: extract_time_from_single_job(__a ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__snake_case = parser.parse_args()
__snake_case = get_job_time(args.workflow_run_id)
__snake_case = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""") | 97 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__snake_case = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__snake_case = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__snake_case = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a ( __a , __a ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Dict = simple_accuracy(__a , __a )
UpperCamelCase__ :List[str] = float(fa_score(y_true=__a , y_pred=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :int = float(pearsonr(__a , __a )[0] )
UpperCamelCase__ :List[str] = float(spearmanr(__a , __a )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
'''simple docstring'''
from typing import Any
def a ( __a ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
UpperCamelCase__ :str = [input_list.count(__a ) for value in input_list]
UpperCamelCase__ :Dict = max(__a ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__a ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 1 |
'''simple docstring'''
__snake_case = 8.314_4598
def a ( __a , __a ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__snake_case = 300
__snake_case = 28
__snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""") | 97 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 1 |
'''simple docstring'''
def a ( __a = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = 2**power
UpperCamelCase__ :Optional[int] = 0
while n:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 97 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'xlnet'
_a = ['mems']
_a = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCamelCase_=32000 , UpperCamelCase_=1024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4096 , UpperCamelCase_="gelu" , UpperCamelCase_=True , UpperCamelCase_="bi" , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=-1 , UpperCamelCase_=False , UpperCamelCase_="last" , UpperCamelCase_=True , UpperCamelCase_="tanh" , UpperCamelCase_=0.1 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = vocab_size
UpperCamelCase__ :List[str] = d_model
UpperCamelCase__ :Dict = n_layer
UpperCamelCase__ :Any = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
UpperCamelCase__ :List[Any] = d_model // n_head
UpperCamelCase__ :str = ff_activation
UpperCamelCase__ :Optional[int] = d_inner
UpperCamelCase__ :int = untie_r
UpperCamelCase__ :Optional[int] = attn_type
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :Optional[int] = layer_norm_eps
UpperCamelCase__ :Tuple = dropout
UpperCamelCase__ :Union[str, Any] = mem_len
UpperCamelCase__ :Optional[int] = reuse_len
UpperCamelCase__ :Optional[Any] = bi_data
UpperCamelCase__ :Optional[Any] = clamp_len
UpperCamelCase__ :Optional[Any] = same_length
UpperCamelCase__ :List[str] = summary_type
UpperCamelCase__ :Optional[Any] = summary_use_proj
UpperCamelCase__ :Optional[Any] = summary_activation
UpperCamelCase__ :int = summary_last_dropout
UpperCamelCase__ :Dict = start_n_top
UpperCamelCase__ :Union[str, Any] = end_n_top
UpperCamelCase__ :Any = bos_token_id
UpperCamelCase__ :int = pad_token_id
UpperCamelCase__ :Optional[int] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , UpperCamelCase_ , )
UpperCamelCase__ :Optional[Any] = kwargs['''use_cache''']
UpperCamelCase__ :List[Any] = use_mems_eval
UpperCamelCase__ :Any = use_mems_train
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) | 97 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'roberta'
def __init__( self , UpperCamelCase_=50265 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Dict = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Any = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Tuple = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :Tuple = position_embedding_type
UpperCamelCase__ :Union[str, Any] = use_cache
UpperCamelCase__ :List[str] = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'bart'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , UpperCamelCase_=50265 , UpperCamelCase_=1024 , UpperCamelCase_=12 , UpperCamelCase_=4096 , UpperCamelCase_=16 , UpperCamelCase_=12 , UpperCamelCase_=4096 , UpperCamelCase_=16 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_="gelu" , UpperCamelCase_=1024 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=0.0 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=2 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :int = d_model
UpperCamelCase__ :Tuple = encoder_ffn_dim
UpperCamelCase__ :List[Any] = encoder_layers
UpperCamelCase__ :int = encoder_attention_heads
UpperCamelCase__ :Optional[Any] = decoder_ffn_dim
UpperCamelCase__ :Optional[int] = decoder_layers
UpperCamelCase__ :List[str] = decoder_attention_heads
UpperCamelCase__ :List[Any] = dropout
UpperCamelCase__ :Dict = attention_dropout
UpperCamelCase__ :Any = activation_dropout
UpperCamelCase__ :int = activation_function
UpperCamelCase__ :List[str] = init_std
UpperCamelCase__ :Optional[Any] = encoder_layerdrop
UpperCamelCase__ :int = decoder_layerdrop
UpperCamelCase__ :str = classifier_dropout
UpperCamelCase__ :str = use_cache
UpperCamelCase__ :Union[str, Any] = encoder_layers
UpperCamelCase__ :Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ :Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase__ :Optional[Any] = {0: '''batch'''}
UpperCamelCase__ :int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCamelCase__ :List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCamelCase__ :int = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase__ :List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCamelCase__ :Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase__ :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCamelCase__ :int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ :Optional[Any] = super().outputs
else:
UpperCamelCase__ :List[str] = super(UpperCamelCase_ , self ).outputs
if self.use_past:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCamelCase__ :Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase__ :Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
UpperCamelCase__ :Optional[Any] = seq_length if not self.use_past else 1
UpperCamelCase__ :Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase__ :int = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ , UpperCamelCase__ :List[Any] = common_inputs['''input_ids'''].shape
UpperCamelCase__ :Optional[int] = common_inputs['''decoder_input_ids'''].shape[1]
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.num_attention_heads
UpperCamelCase__ :Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ :Any = decoder_seq_length + 3
UpperCamelCase__ :str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase__ :str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
UpperCamelCase__ :Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase__ , UpperCamelCase__ :Any = self.num_layers
UpperCamelCase__ :Optional[int] = min(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
UpperCamelCase__ :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
UpperCamelCase__ :Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ , UpperCamelCase__ :List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase__ :List[str] = seqlen + 2
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.num_layers
UpperCamelCase__ , UpperCamelCase__ :Any = self.num_attention_heads
UpperCamelCase__ :List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ :Optional[Any] = common_inputs['''attention_mask'''].dtype
UpperCamelCase__ :int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
UpperCamelCase__ :Any = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ :Optional[Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ :Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase__ :str = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ :Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
elif self.task == "causal-lm":
UpperCamelCase__ :Any = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
UpperCamelCase__ :Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ :List[Any] = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCamelCase__ :Any = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'vivit'
def __init__( self , UpperCamelCase_=224 , UpperCamelCase_=32 , UpperCamelCase_=[2, 16, 16] , UpperCamelCase_=3 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_fast" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-06 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Optional[Any] = num_attention_heads
UpperCamelCase__ :List[str] = intermediate_size
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :int = layer_norm_eps
UpperCamelCase__ :List[Any] = image_size
UpperCamelCase__ :Optional[Any] = num_frames
UpperCamelCase__ :Dict = tubelet_size
UpperCamelCase__ :Optional[int] = num_channels
UpperCamelCase__ :List[Any] = qkv_bias
super().__init__(**UpperCamelCase_ ) | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
'''simple docstring'''
import os
def a ( __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = len(grid[0] )
UpperCamelCase__ :List[Any] = len(__a )
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :str = 0
UpperCamelCase__ :Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__a ):
for j in range(n_rows - 3 ):
UpperCamelCase__ :Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCamelCase__ :Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCamelCase__ :Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCamelCase__ :str = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCamelCase__ :Optional[int] = max(
__a , __a , __a , __a )
if max_product > largest:
UpperCamelCase__ :List[Any] = max_product
return largest
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :int = []
with open(os.path.dirname(__a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
UpperCamelCase__ :str = [[int(__a ) for i in grid[j]] for j in range(len(__a ) )]
return largest_product(__a )
if __name__ == "__main__":
print(solution()) | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.