code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _a( UpperCamelCase__ : int = 1_0_0_0_0_0_0, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : defaultdict =defaultdict(UpperCamelCase__ )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
SCREAMING_SNAKE_CASE__ : int =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase__, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : List[Any] , __lowercase : Optional[NestedDataStructureLike[PathLike]] = None , __lowercase : Optional[NamedSplit] = None , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : Tuple , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =path_or_paths
SCREAMING_SNAKE_CASE__ : Dict =split if split or isinstance(__lowercase , __lowercase ) else '''train'''
SCREAMING_SNAKE_CASE__ : Dict =features
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cache_dir
SCREAMING_SNAKE_CASE__ : Tuple =keep_in_memory
SCREAMING_SNAKE_CASE__ : str =streaming
SCREAMING_SNAKE_CASE__ : str =num_proc
SCREAMING_SNAKE_CASE__ : Tuple =kwargs
@abstractmethod
def __magic_name__ ( self : List[str] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[int] , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : List[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =features
SCREAMING_SNAKE_CASE__ : str =cache_dir
SCREAMING_SNAKE_CASE__ : Tuple =keep_in_memory
SCREAMING_SNAKE_CASE__ : Optional[int] =streaming
SCREAMING_SNAKE_CASE__ : List[str] =num_proc
SCREAMING_SNAKE_CASE__ : str =kwargs
@abstractmethod
def __magic_name__ ( self : Any ) -> Union[Dataset, IterableDataset]:
pass
| 296
| 1
|
"""simple docstring"""
def lowercase__(A = 200 ) ->int:
"""simple docstring"""
lowercase__ : Union[str, Any]= [1, 2, 5, 10, 20, 50, 100, 200]
lowercase__ : int= [0] * (pence + 1)
lowercase__ : Optional[int]= 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 707
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85
| 0
|
from __future__ import annotations
from collections.abc import Generator
def snake_case_ ( ) -> Generator[int, None, None]:
lowercase__ : dict[int, int] = {}
lowercase__ : int = 2
while True:
lowercase__ : Tuple = factor_map.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
if factor:
lowercase__ : Optional[Any] = factor + prime
while x in factor_map:
x += factor
lowercase__ : Any = factor
else:
lowercase__ : Optional[Any] = prime
yield prime
prime += 1
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1E10 ) -> int:
lowercase__ : List[str] = sieve()
lowercase__ : Dict = 1
while True:
lowercase__ : str = next(lowerCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 397
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((a__) , (a__)) : List[Any] = extended_euclid(lowerCAmelCase__ , a % b )
a__ : str = a // b
return (y, x - k * y)
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] = na * na
a__ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Optional[Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[int] = (b % n + n) % n
return b
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
a__ , a__ : List[Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict = na * na
a__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 642
| 0
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Dict = CodeGenTokenizer
A_ : Optional[Any] = CodeGenTokenizerFast
A_ : List[str] = True
A_ : Dict = {'add_prefix_space': True}
A_ : Union[str, Any] = False
def __lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__lowerCAmelCase : Any = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCAmelCase : int = {'unk_token': '<unk>'}
__lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = 'lower newer'
__lowerCAmelCase : Optional[int] = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase : Optional[Any] = 'lower newer'
__lowerCAmelCase : Optional[Any] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowerCAmelCase : int = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = tokens + [tokenizer.unk_token]
__lowerCAmelCase : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = 'lower newer'
# Testing tokenization
__lowerCAmelCase : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
__lowerCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing the unknown token
__lowerCAmelCase : Tuple = tokens + [rust_tokenizer.unk_token]
__lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
__lowerCAmelCase : Union[str, Any] = 'This is a simple input'
__lowerCAmelCase : Dict = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : str = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__lowerCAmelCase : List[str] = 'This is a simple input'
__lowerCAmelCase : Dict = ['This is a simple input looooooooong', 'This is a simple input']
__lowerCAmelCase : str = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__lowerCAmelCase : Dict = tokenizer.pad_token_id
__lowerCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=30 , return_tensors='np' )
__lowerCAmelCase : Any = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : int = tokenizer(*_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=60 , return_tensors='np' )
__lowerCAmelCase : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = '$$$'
__lowerCAmelCase : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_SCREAMING_SNAKE_CASE , add_bos_token=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = 'This is a simple input'
__lowerCAmelCase : Dict = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : Any = tokenizer.bos_token_id
__lowerCAmelCase : Union[str, Any] = tokenizer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCAmelCase : Optional[int] = tokenizer.decode(out_s.input_ids )
__lowerCAmelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__lowerCAmelCase : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__lowerCAmelCase : int = '\nif len_a > len_b: result = a\nelse: result = b'
__lowerCAmelCase : int = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__lowerCAmelCase : List[Any] = tokenizer.decode(_SCREAMING_SNAKE_CASE , truncate_before_pattern=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
| 549
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if n_term == "":
return []
__lowerCAmelCase : list = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F"1/{temp + 1}" if series else '1' )
return series
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 549
| 1
|
from bisect import bisect
from itertools import accumulate
def _a ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
lowerCAmelCase__ : int = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ : Tuple = list(accumulate(__UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = bisect(__UpperCamelCase ,__UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : List[Any] = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
A__ : Optional[Any] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
A__ : List[Any] = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _a ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
return float((preds == labels).mean() )
def _a ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple="binary" ):
lowerCAmelCase__ : Dict = simple_accuracy(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : List[Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any ):
lowerCAmelCase__ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = f'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
lowerCAmelCase__ : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Union[str, Any] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average='''macro''' )
fas.append(__UpperCamelCase )
lowerCAmelCase__ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
lowerCAmelCase__ : Dict = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = sum(__UpperCamelCase ) / len(__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif self.config_name == "cb":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase__ : Union[str, Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase__ : Optional[Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 233
| 1
|
lowerCAmelCase = 'Input must be a string of 8 numbers plus letter'
lowerCAmelCase = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__A , __A ):
lowercase__ = f'Expected string as input, found {type(__A ).__name__}'
raise TypeError(__A )
lowercase__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(__A ) != 9:
raise ValueError(__A )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__A ) from ex
if letter.isdigit():
raise ValueError(__A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCAmelCase = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 429
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ : ClassVar[Features] = Features({"image": Image()} )
UpperCAmelCase__ : ClassVar[Features] = Features({"labels": ClassLabel} )
UpperCAmelCase__ : str = "image"
UpperCAmelCase__ : str = "labels"
def __lowercase ( self , _a ) -> Any:
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_a : Dict = copy.deepcopy(self )
_a : List[Any] = self.label_schema.copy()
_a : Any = features[self.label_column]
_a : int = label_schema
return task_template
@property
def __lowercase ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 14
|
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658
| 0
|
from collections import deque
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = deque()
_UpperCAmelCase : Any = [False for _ in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase : List[str] = [-1 for _ in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase : Dict = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = index # the number when this node is seen
_UpperCAmelCase : str = index # lowest rank node reachable from here
index += 1
stack.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = True
for w in g[v]:
if index_of[w] == -1:
_UpperCAmelCase : int = strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_UpperCAmelCase : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Tuple = stack.pop()
_UpperCAmelCase : Any = False
component.append(_SCREAMING_SNAKE_CASE )
while w != v:
_UpperCAmelCase : Any = stack.pop()
_UpperCAmelCase : List[Any] = False
component.append(_SCREAMING_SNAKE_CASE )
components.append(_SCREAMING_SNAKE_CASE )
return index
_UpperCAmelCase : Union[str, Any] = []
for v in range(_SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return components
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
__lowerCamelCase = 7
__lowerCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__lowerCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__lowerCamelCase = [(u, v) for u, v in zip(source, target)]
__lowerCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 328
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : str = ""
else:
_UpperCAmelCase : Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dct.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ViTMSNConfig()
_UpperCAmelCase : int = 1_0_0_0
_UpperCAmelCase : str = "datasets/huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "r" ) )
_UpperCAmelCase : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[Any] = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = 3_8_4
_UpperCAmelCase : Any = 1_5_3_6
_UpperCAmelCase : Optional[Any] = 6
elif "l16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = 1_0_2_4
_UpperCAmelCase : Union[str, Any] = 4_0_9_6
_UpperCAmelCase : Any = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : List[Any] = 0.1
elif "b4" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
_UpperCAmelCase : str = 7
_UpperCAmelCase : List[str] = 1_0_2_4
_UpperCAmelCase : int = 4_0_9_6
_UpperCAmelCase : List[str] = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Any = ViTMSNModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )["target_encoder"]
_UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_UpperCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_UpperCAmelCase : Any = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_UpperCAmelCase : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 328
| 1
|
UpperCAmelCase__ : Optional[Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A ( snake_case__ : dict , snake_case__ : Any , snake_case__ : Any ) -> list[str]:
'''simple docstring'''
__snake_case = set()
# keep track of all the paths to be checked
__snake_case = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__snake_case = queue.pop(0 )
# get the last node from the path
__snake_case = path[-1]
if node not in explored:
__snake_case = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__snake_case = list(snake_case__ )
new_path.append(snake_case__ )
queue.append(snake_case__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(snake_case__ )
# in case there's no path between the 2 nodes
return []
def A ( snake_case__ : dict , snake_case__ : Union[str, Any] , snake_case__ : Any ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__snake_case = [start]
__snake_case = set(snake_case__ )
# Keep tab on distances from `start` node.
__snake_case = {start: 0, target: -1}
while queue:
__snake_case = queue.pop(0 )
if node == target:
__snake_case = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(snake_case__ )
queue.append(snake_case__ )
__snake_case = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 313
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase__ : Any = get_logger(__name__)
class __lowercase :
__UpperCAmelCase = '''dummy_data'''
__UpperCAmelCase = '''datasets'''
__UpperCAmelCase = False
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = True , lowercase_ = None , ) -> Any:
__snake_case = 0
__snake_case = dataset_name
__snake_case = cache_dir
__snake_case = use_local_dummy_data
__snake_case = config
# download_callbacks take a single url as input
__snake_case = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case = str(lowercase_)
# to be downloaded
__snake_case = None
__snake_case = None
@property
def _a ( self) -> int:
if self._dummy_file is None:
__snake_case = self.download_dummy_data()
return self._dummy_file
@property
def _a ( self) -> Dict:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def _a ( self) -> Dict:
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def _a ( self) -> List[Any]:
__snake_case = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case = cached_path(
lowercase_ , cache_dir=self.cache_dir , extract_compressed_file=lowercase_ , force_extract=lowercase_)
return os.path.join(lowercase_ , self.dummy_file_name)
@property
def _a ( self) -> Union[str, Any]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def _a ( self) -> int:
if self._bucket_url is None:
__snake_case = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def _a ( self) -> List[str]:
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def _a ( self , lowercase_ , *lowercase_) -> Any:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase_ , lowercase_):
return self.create_dummy_data_dict(lowercase_ , lowercase_)
elif isinstance(lowercase_ , (list, tuple)):
return self.create_dummy_data_list(lowercase_ , lowercase_)
else:
return self.create_dummy_data_single(lowercase_ , lowercase_)
def _a ( self , lowercase_ , *lowercase_) -> Union[str, Any]:
return self.download_and_extract(lowercase_)
def _a ( self , lowercase_ , lowercase_) -> Tuple:
return self.download_and_extract(lowercase_)
def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> List[str]:
return path
def _a ( self) -> Dict:
return {}
def _a ( self , lowercase_ , lowercase_) -> List[Any]:
__snake_case = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase_ , lowercase_):
for single_url in single_urls:
download_callback(lowercase_)
else:
__snake_case = single_urls
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase_ , lowercase_):
__snake_case = [os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name)) for x in single_urls]
else:
__snake_case = single_urls
__snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name))
__snake_case = value
# make sure that values are unique
if all(isinstance(lowercase_ , lowercase_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__snake_case = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _a ( self , lowercase_ , lowercase_) -> Dict:
__snake_case = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase_)) for url in data_url)
__snake_case = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__snake_case = [data_url[0]] * len(lowercase_)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(lowercase_)
return dummy_data_list
def _a ( self , lowercase_ , lowercase_) -> Optional[Any]:
for download_callback in self.download_callbacks:
download_callback(lowercase_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(lowercase_) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _a ( self) -> List[Any]:
pass
def _a ( self) -> str:
pass
def _a ( self , lowercase_) -> List[Any]:
def _iter_archive_members(lowercase_):
# this preserves the order of the members inside the ZIP archive
__snake_case = Path(self.dummy_file).parent
__snake_case = path.relative_to(lowercase_)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__snake_case = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(lowercase_)
__snake_case = Path(lowercase_)
__snake_case = _iter_archive_members(lowercase_) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(lowercase_).as_posix(), file_path.open('rb')
def _a ( self , lowercase_) -> int:
if not isinstance(lowercase_ , lowercase_):
__snake_case = [paths]
for path in paths:
if os.path.isfile(lowercase_):
if os.path.basename(lowercase_).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase_):
if os.path.basename(lowercase_).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(lowercase_):
if filename.startswith(('.', '__')):
continue
yield os.path.join(lowercase_ , lowercase_)
| 313
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def A ( __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser('''env''' )
else:
UpperCAmelCase_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCAmelCase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = torch.__version__
UpperCAmelCase_ = torch.cuda.is_available()
UpperCAmelCase_ = is_xpu_available()
UpperCAmelCase_ = is_npu_available()
UpperCAmelCase_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
UpperCAmelCase_ = load_config_from_file(args.config_file ).to_dict()
UpperCAmelCase_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f"{pt_version} ({pt_cuda_available})",
'''PyTorch XPU available''': str(__UpperCAmelCase ),
'''PyTorch NPU available''': str(__UpperCAmelCase ),
'''System RAM''': f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
UpperCAmelCase_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f"- {prop}: {val}" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
UpperCAmelCase_ = (
'''\n'''.join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else f"\t{accelerate_config}"
)
print(__UpperCAmelCase )
UpperCAmelCase_ = accelerate_config
return info
def A ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = env_command_parser()
UpperCAmelCase_ = parser.parse_args()
env_command(__UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 561
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =DiTPipeline
UpperCamelCase__ : Optional[int] =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : int =PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
UpperCamelCase__ : List[Any] =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Dict =False
def __a ( self :List[str]) -> Dict:
torch.manual_seed(0)
UpperCAmelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowercase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowercase , )
UpperCAmelCase_ = AutoencoderKL()
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __a ( self :Tuple , _lowercase :str , _lowercase :Union[str, Any]=0) -> List[str]:
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = pipe(**_lowercase).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3))
UpperCAmelCase_ = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457])
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
def __a ( self :int) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=_lowercase , expected_max_diff=1E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __a ( self :Dict) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
def __a ( self :Tuple) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''')
pipe.to('''cuda''')
UpperCAmelCase_ = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
UpperCAmelCase_ = pipe.get_label_ids(_lowercase)
UpperCAmelCase_ = pipe(_lowercase , generator=_lowercase , num_inference_steps=40 , output_type='''np''').images
for word, image in zip(_lowercase , _lowercase):
UpperCAmelCase_ = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy")
assert np.abs((expected_image - image).max()) < 1E-2
def __a ( self :str) -> str:
UpperCAmelCase_ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''')
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to('''cuda''')
UpperCAmelCase_ = ['''vase''', '''umbrella''']
UpperCAmelCase_ = pipe.get_label_ids(_lowercase)
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type='''np''').images
for word, image in zip(_lowercase , _lowercase):
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f"/dit/{word}_512.npy")
assert np.abs((expected_image - image).max()) < 1E-1
| 561
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _snake_case :
def __init__( self : Optional[int], __lowercase : int, __lowercase : int=13, __lowercase : List[str]=7, __lowercase : List[str]=6, __lowercase : str=17, __lowercase : List[str]=23, __lowercase : Any=11, __lowercase : Optional[int]=True, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = act_dim
lowercase__ = state_dim
lowercase__ = hidden_size
lowercase__ = max_length
lowercase__ = is_training
def A__ ( self : Union[str, Any] ):
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = ids_tensor((self.batch_size, self.seq_length), vocab_size=1000 )
lowercase__ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self : Union[str, Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, )
def A__ ( self : List[Any], __lowercase : str, __lowercase : Any, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : Any, __lowercase : List[Any], __lowercase : List[str], ):
lowercase__ = DecisionTransformerModel(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(A_, A_, A_, A_, A_, A_ )
self.parent.assertEqual(result.state_preds.shape, states.shape )
self.parent.assertEqual(result.action_preds.shape, actions.shape )
self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self : str ):
lowercase__ = self.prepare_config_and_inputs()
(
lowercase__
) = config_and_inputs
lowercase__ = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =(DecisionTransformerModel,) if is_torch_available() else ()
UpperCamelCase__ : str =()
UpperCamelCase__ : Dict ={"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCamelCase__ : List[Any] =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Dict =False
UpperCamelCase__ : str =False
UpperCamelCase__ : str =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Optional[int] =False
def A__ ( self : List[str] ):
lowercase__ = DecisionTransformerModelTester(self )
lowercase__ = ConfigTester(self, config_class=A_, hidden_size=37 )
def A__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A__ ( self : Optional[int] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def A__ ( self : Tuple ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DecisionTransformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A__ ( self : str ):
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(A_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(A_ )], A_ )
@require_torch
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : Union[str, Any] ):
lowercase__ = 2 # number of steps of autoregressive prediction we will perform
lowercase__ = 10 # defined by the RL environment, may be normalized
lowercase__ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase__ = model.to(A_ )
lowercase__ = model.config
torch.manual_seed(0 )
lowercase__ = torch.randn(1, 1, config.state_dim ).to(device=A_, dtype=torch.floataa ) # env.reset()
lowercase__ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]], device=A_ )
lowercase__ = torch.tensor(A_, device=A_, dtype=torch.floataa ).reshape(1, 1, 1 )
lowercase__ = state
lowercase__ = torch.zeros(1, 0, config.act_dim, device=A_, dtype=torch.floataa )
lowercase__ = torch.zeros(1, 0, device=A_, dtype=torch.floataa )
lowercase__ = torch.tensor(0, device=A_, dtype=torch.long ).reshape(1, 1 )
for step in range(A_ ):
lowercase__ = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=A_ )], dim=1 )
lowercase__ = torch.cat([rewards, torch.zeros(1, 1, device=A_ )], dim=1 )
lowercase__ = torch.ones(1, states.shape[1] ).to(dtype=torch.long, device=states.device )
with torch.no_grad():
lowercase__ = model(
states=A_, actions=A_, rewards=A_, returns_to_go=A_, timesteps=A_, attention_mask=A_, return_dict=A_, )
self.assertEqual(action_pred.shape, actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1e-4 ) )
lowercase__ = ( # env.step(action)
torch.randn(1, 1, config.state_dim ).to(device=A_, dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase__ = action_pred[0, -1]
lowercase__ = torch.cat([states, state], dim=1 )
lowercase__ = returns_to_go[0, -1] - reward
lowercase__ = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1 )], dim=1 )
lowercase__ = torch.cat(
[timesteps, torch.ones((1, 1), device=A_, dtype=torch.long ) * (step + 1)], dim=1 )
| 413
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ):
lowerCamelCase_: List[str] = False
lowerCamelCase_: Dict = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
lowerCamelCase_: Dict = True
elif "IPython" in sys.modules:
lowerCamelCase_: Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
lowerCamelCase_: str = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
lowerCamelCase_: Optional[Any] = 8
lowerCamelCase_: List[Any] = PrepareForLaunch(_UpperCAmelCase , distributed_type="""TPU""" )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr="""127.0.01""" , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase_: Tuple = PrepareForLaunch(_UpperCAmelCase , distributed_type="""MULTI_GPU""" )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_: List[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
lowerCamelCase_: Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method="""fork""" )
| 423
| 0
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowercase_ = hex_num[0] == """-"""
if is_negative:
lowercase_ = hex_num[1:]
try:
lowercase_ = int(UpperCAmelCase__ , 1_6 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowercase_ = """"""
while int_num > 0:
lowercase_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,) -> Dict:
UpperCAmelCase_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : List[str] = min_resolution
UpperCAmelCase_ : Any = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : str = do_normalize
def a__ ( self ) -> Optional[int]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> int:
UpperCAmelCase_ : Dict = ImageGPTImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''clusters''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def a__ ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Tuple = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,obj[key] ) )
else:
self.assertEqual(obj[key] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Any = os.path.join(_SCREAMING_SNAKE_CASE ,'''image_processor.json''' )
image_processor_first.to_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_json_file(_SCREAMING_SNAKE_CASE ).to_dict()
UpperCAmelCase_ : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.image_processing_class.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
UpperCAmelCase_ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_SCREAMING_SNAKE_CASE )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def a__ ( self ) -> str:
pass
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
UpperCAmelCase_ : Optional[int] = Image.open(dataset[4]['''file'''] )
UpperCAmelCase_ : Any = Image.open(dataset[5]['''file'''] )
UpperCAmelCase_ : Union[str, Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
UpperCAmelCase_ : Any = prepare_images()
# test non-batched
UpperCAmelCase_ : Tuple = image_processing(images[0] ,return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1_024) )
UpperCAmelCase_ : str = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,_SCREAMING_SNAKE_CASE )
# test batched
UpperCAmelCase_ : List[str] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1_024) )
UpperCAmelCase_ : List[str] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,_SCREAMING_SNAKE_CASE )
| 30
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = """ResNetConfig"""
# Base docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = [1, 2048, 7, 7]
# Image classification docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = """tiger cat"""
lowerCamelCase__ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 3 , a__ = 1 , a__ = "relu" ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__snake_case :Dict = nn.Convad(
a__ , a__ , kernel_size=a__ , stride=a__ , padding=kernel_size // 2 , bias=a__ )
__snake_case :str = nn.BatchNormad(a__ )
__snake_case :List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :int = self.convolution(a__ )
__snake_case :Any = self.normalization(a__ )
__snake_case :Optional[int] = self.activation(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ ) -> str:
'''simple docstring'''
super().__init__()
__snake_case :Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__snake_case :List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__snake_case :Tuple = config.num_channels
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__snake_case :Optional[int] = self.embedder(a__ )
__snake_case :int = self.pooler(a__ )
return embedding
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[Any] = nn.Convad(a__ , a__ , kernel_size=1 , stride=a__ , bias=a__ )
__snake_case :Tuple = nn.BatchNormad(a__ )
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :Any = self.convolution(a__ )
__snake_case :str = self.normalization(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 1 , a__ = "relu" ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case :int = in_channels != out_channels or stride != 1
__snake_case :Tuple = (
ResNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
__snake_case :Optional[int] = nn.Sequential(
ResNetConvLayer(a__ , a__ , stride=a__ ) , ResNetConvLayer(a__ , a__ , activation=a__ ) , )
__snake_case :Union[str, Any] = ACTaFN[activation]
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :int = hidden_state
__snake_case :Dict = self.layer(a__ )
__snake_case :Any = self.shortcut(a__ )
hidden_state += residual
__snake_case :List[Any] = self.activation(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ = 1 , a__ = "relu" , a__ = 4 ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = in_channels != out_channels or stride != 1
__snake_case :List[Any] = out_channels // reduction
__snake_case :List[str] = (
ResNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
__snake_case :int = nn.Sequential(
ResNetConvLayer(a__ , a__ , kernel_size=1 ) , ResNetConvLayer(a__ , a__ , stride=a__ ) , ResNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
__snake_case :Dict = ACTaFN[activation]
def __lowercase ( self , a__ ) -> Any:
'''simple docstring'''
__snake_case :List[str] = hidden_state
__snake_case :List[Any] = self.layer(a__ )
__snake_case :List[Any] = self.shortcut(a__ )
hidden_state += residual
__snake_case :Optional[Any] = self.activation(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , ) -> Any:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__snake_case :Tuple = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(a__ , a__ , stride=a__ , activation=config.hidden_act ) , *[layer(a__ , a__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowercase ( self , a__ ) -> Tensor:
'''simple docstring'''
__snake_case :Union[str, Any] = input
for layer in self.layers:
__snake_case :str = layer(a__ )
return hidden_state
class snake_case__ ( nn.Module):
'''simple docstring'''
def __init__( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__snake_case :Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case :Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a__ , config.depths[1:] ):
self.stages.append(ResNetStage(a__ , a__ , a__ , depth=a__ ) )
def __lowercase ( self , a__ , a__ = False , a__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
__snake_case :Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case :Optional[int] = hidden_states + (hidden_state,)
__snake_case :Tuple = stage_module(a__ )
if output_hidden_states:
__snake_case :Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=a__ , )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : List[Any] = ResNetConfig
lowerCamelCase : Optional[Any] = "resnet"
lowerCamelCase : str = "pixel_values"
lowerCamelCase : Optional[int] = True
def __lowercase ( self , a__ ) -> Dict:
'''simple docstring'''
if isinstance(a__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowercase ( self , a__ , a__=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(a__ , a__ ):
__snake_case :Union[str, Any] = value
lowerCamelCase__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , lowercase_ , )
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__ ) -> Tuple:
'''simple docstring'''
super().__init__(a__ )
__snake_case :int = config
__snake_case :Any = ResNetEmbeddings(a__ )
__snake_case :Dict = ResNetEncoder(a__ )
__snake_case :Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self , a__ , a__ = None , a__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__snake_case :List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case :int = self.embedder(a__ )
__snake_case :Any = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ )
__snake_case :Any = encoder_outputs[0]
__snake_case :int = self.pooler(a__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase_ , )
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__ ) -> List[Any]:
'''simple docstring'''
super().__init__(a__ )
__snake_case :Union[str, Any] = config.num_labels
__snake_case :Optional[int] = ResNetModel(a__ )
# classification head
__snake_case :List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self , a__ = None , a__ = None , a__ = None , a__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
__snake_case :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case :Tuple = self.resnet(a__ , output_hidden_states=a__ , return_dict=a__ )
__snake_case :Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
__snake_case :Optional[Any] = self.classifier(a__ )
__snake_case :Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case :List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case :List[Any] = """single_label_classification"""
else:
__snake_case :Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
__snake_case :Any = MSELoss()
if self.num_labels == 1:
__snake_case :Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case :Any = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
__snake_case :int = CrossEntropyLoss()
__snake_case :List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case :List[str] = BCEWithLogitsLoss()
__snake_case :int = loss_fct(a__ , a__ )
if not return_dict:
__snake_case :int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , lowercase_ , )
class snake_case__ ( lowercase_ , lowercase_):
'''simple docstring'''
def __init__( self , a__ ) -> int:
'''simple docstring'''
super().__init__(a__ )
super()._init_backbone(a__ )
__snake_case :Optional[int] = [config.embedding_size] + config.hidden_sizes
__snake_case :str = ResNetEmbeddings(a__ )
__snake_case :Any = ResNetEncoder(a__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@replace_return_docstrings(output_type=a__ , config_class=_CONFIG_FOR_DOC )
def __lowercase ( self , a__ , a__ = None , a__ = None ) -> BackboneOutput:
'''simple docstring'''
__snake_case :int = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case :Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case :List[str] = self.embedder(a__ )
__snake_case :List[Any] = self.encoder(a__ , output_hidden_states=a__ , return_dict=a__ )
__snake_case :Optional[int] = outputs.hidden_states
__snake_case :Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__snake_case :Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=a__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=a__ , )
| 455
| 0
|
'''simple docstring'''
import math
import qiskit
def snake_case__ ( a = 1 , a = 1 , a = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(a , a )
or isinstance(a , a )
or isinstance(a , a )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(a ) != input_a)
or (math.floor(a ) != input_a)
or (math.floor(a ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
snake_case__ = qiskit.QuantumRegister(4 , """qr""" )
snake_case__ = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
snake_case__ = [input_a, input_a, carry_in]
snake_case__ = qiskit.QuantumCircuit(a , a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a ) # measure the last two qbits
snake_case__ = qiskit.Aer.get_backend("""aer_simulator""" )
snake_case__ = qiskit.execute(a , a , shots=1000 )
return job.result().get_counts(a )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 702
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : str = """informer"""
UpperCAmelCase_ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "student_t" , __UpperCamelCase : str = "nll" , __UpperCamelCase : int = 1 , __UpperCamelCase : List[int] = None , __UpperCamelCase : Optional[Union[str, bool]] = "mean" , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : int = 6_4 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : bool = True , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.05 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : int = 1_0_0 , __UpperCamelCase : float = 0.02 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : str = "prob" , __UpperCamelCase : int = 5 , __UpperCamelCase : bool = True , **__UpperCamelCase : Any , ):
'''simple docstring'''
snake_case__ = prediction_length
snake_case__ = context_length or prediction_length
snake_case__ = distribution_output
snake_case__ = loss
snake_case__ = input_size
snake_case__ = num_time_features
snake_case__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case__ = scaling
snake_case__ = num_dynamic_real_features
snake_case__ = num_static_real_features
snake_case__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case__ = cardinality
else:
snake_case__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case__ = embedding_dimension
else:
snake_case__ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ = num_parallel_samples
# Transformer architecture configuration
snake_case__ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ = d_model
snake_case__ = encoder_attention_heads
snake_case__ = decoder_attention_heads
snake_case__ = encoder_ffn_dim
snake_case__ = decoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = decoder_layers
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = encoder_layerdrop
snake_case__ = decoder_layerdrop
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = use_cache
# Informer
snake_case__ = attention_type
snake_case__ = sampling_factor
snake_case__ = distil
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCAmelCase( self : str ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 566
| 0
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=768 ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
lowerCamelCase_ = proj_size
lowerCamelCase_ = CLIPVisionModel(UpperCamelCase__ )
lowerCamelCase_ = PaintByExampleMapper(UpperCamelCase__ )
lowerCamelCase_ = nn.LayerNorm(config.hidden_size )
lowerCamelCase_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCamelCase_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model(pixel_values=UpperCamelCase__ )
lowerCamelCase_ = clip_output.pooler_output
lowerCamelCase_ = self.mapper(latent_states[:, None] )
lowerCamelCase_ = self.final_layer_norm(UpperCamelCase__ )
lowerCamelCase_ = self.proj_out(UpperCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = (config.num_hidden_layers + 1) // 5
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = 1
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , activation_fn='''gelu''' , attention_bias=UpperCamelCase__ )
for _ in range(UpperCamelCase__ )
] )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
for block in self.blocks:
lowerCamelCase_ = block(UpperCamelCase__ )
return hidden_states
| 142
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[str] = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = "mctct"
def __init__( self , UpperCamelCase__=8_065 , UpperCamelCase__=1_536 , UpperCamelCase__=36 , UpperCamelCase__=6_144 , UpperCamelCase__=4 , UpperCamelCase__=384 , UpperCamelCase__=920 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.3 , UpperCamelCase__="relu" , UpperCamelCase__=0.02 , UpperCamelCase__=0.3 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0.3 , UpperCamelCase__=1 , UpperCamelCase__=(7,) , UpperCamelCase__=(3,) , UpperCamelCase__=80 , UpperCamelCase__=1 , UpperCamelCase__=None , UpperCamelCase__="sum" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = layerdrop
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = conv_glu_dim
lowerCamelCase_ = conv_dropout
lowerCamelCase_ = num_conv_layers
lowerCamelCase_ = input_feat_per_channel
lowerCamelCase_ = input_channels
lowerCamelCase_ = conv_channels
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase_ = list(UpperCamelCase__ )
lowerCamelCase_ = list(UpperCamelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 142
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a__ = """bart"""
a__ = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> Optional[int]:
if LOAD_DENSE_INDEX:
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_snake_case : Optional[Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_snake_case : Dict = qar_model.eval()
else:
_snake_case , _snake_case : str = (None, None)
if MODEL_TYPE == "bart":
_snake_case : Tuple = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_snake_case : int = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_snake_case : Optional[Any] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_snake_case : int = sas_model.eval()
else:
_snake_case , _snake_case : Tuple = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_snake_case : List[str] = faiss.StandardGpuResources()
_snake_case : Any = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
_snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
_snake_case : List[str] = faiss.IndexFlatIP(128 )
_snake_case : Dict = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE__ ) # TODO fix for larger GPU
else:
_snake_case , _snake_case : str = (None, None)
_snake_case : List[Any] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> List[Any]:
_snake_case : Any = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
_snake_case : str = elia["""train_eli5"""]
_snake_case : Tuple = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
_snake_case : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE__ )
return (elia_train, eli5_train_q_index)
a__, a__, a__ = load_indexes()
a__, a__, a__, a__ = load_models()
a__, a__ = load_train_data()
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=10 ) -> Optional[int]:
_snake_case : str = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Optional[int] = eli5_train_q_index.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = [elia_train[int(SCREAMING_SNAKE_CASE__ )] for i in I[0]]
return nn_examples
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple="wiki40b" , SCREAMING_SNAKE_CASE__ : List[Any]="dense" , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 ) -> Any:
if source == "none":
_snake_case , _snake_case : List[Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_snake_case , _snake_case : Any = query_qa_dense_index(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
_snake_case , _snake_case : Union[str, Any] = query_es_index(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index_name="""english_wiki40b_snippets_100w""" , n_results=SCREAMING_SNAKE_CASE__ , )
_snake_case : Any = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_snake_case : List[str] = """question: {} context: {}""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE__ : None),
} )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : List[str]=256 , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0.9_5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.8 ) -> str:
with torch.no_grad():
_snake_case : List[str] = qa_sas_generate(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , temp=SCREAMING_SNAKE_CASE__ , top_p=SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ , max_input_length=1_024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
a__ = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
a__ = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a__ = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
a__ = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
a__ = st.sidebar.checkbox("""Demo options""")
if demo_options:
a__ = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
a__ = action_list.index(action_st)
a__ = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
a__ = show_type == """Show full text of passages"""
else:
a__ = 3
a__ = True
a__ = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
a__ = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
a__ = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
a__ = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
a__ = """wiki40b"""
a__ = """dense"""
a__ = """beam"""
a__ = 2
a__ = 64
a__ = 2_56
a__ = None
a__ = None
a__ = st.sidebar.checkbox("""Generation options""")
if generate_options:
a__ = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
a__ = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
a__ = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
a__ = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
a__ = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a__ = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
a__ = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
a__ = None
# start main text
a__ = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
a__ = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a__ = st.text_input("""Enter your question here:""", """""")
else:
a__ = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
a__, a__ = make_support(question, source=wiki_source, method="""dense""", n_results=10)
a__, a__ = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
a__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a__ = support_list[:10]
a__ = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
a__, a__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
a__, a__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
a__ = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
a__ = res[1].strip()
if sec_titles == "":
a__ = """[{}]({})""".format(res[0], wiki_url)
else:
a__ = sec_titles.split(""" & """)
a__ = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
a__ = find_nearest_training(question)
a__ = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
a__ = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
a__ = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 198
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
_snake_case : Any = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
_snake_case : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
_snake_case : Optional[Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
_snake_case : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
_snake_case : Any = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
_snake_case : int = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
_snake_case : Union[str, Any] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
_snake_case : Any = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
_snake_case : Dict = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
_snake_case : Dict = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
_snake_case : str = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
_snake_case : int = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
_snake_case : Union[str, Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_snake_case : Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_snake_case : Union[str, Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_snake_case : Tuple = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
_snake_case : Any = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
_snake_case : str = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
_snake_case : List[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
_snake_case : Optional[int] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
_snake_case : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
_snake_case : Tuple = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
for key in orig_state_dict.copy().keys():
_snake_case : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : Any = key.split(""".""" )
_snake_case , _snake_case : List[Any] = int(key_split[2] ), int(key_split[4] )
_snake_case : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_snake_case : List[Any] = val[:dim, :]
_snake_case : Union[str, Any] = val[dim : dim * 2, :]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Union[str, Any] = val[:dim]
_snake_case : str = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : int = key.split(""".""" )
_snake_case : Optional[int] = int(key_split[3] )
_snake_case : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
_snake_case : int = val[:dim, :]
_snake_case : Tuple = val[
dim : dim * 2, :
]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Tuple = val[:dim]
_snake_case : Tuple = val[dim : dim * 2]
_snake_case : str = val[-dim:]
else:
_snake_case : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_snake_case : Any = val.squeeze_()
else:
_snake_case : Optional[int] = val
return orig_state_dict
def lowercase ( ) -> List[str]:
_snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Optional[Any]:
_snake_case : Any = GroupViTConfig()
_snake_case : List[Any] = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
_snake_case : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
_snake_case : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
_snake_case : List[Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
_snake_case : List[Any] = prepare_img()
_snake_case : int = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
_snake_case : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
_snake_case : Union[str, Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_snake_case : Union[str, Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
a__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 198
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 252
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__snake_case : Dict = logging.get_logger(__name__)
@add_end_docstrings(a )
class A ( a ):
def __init__( self , *snake_case_ , **snake_case_ ) -> int:
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , "decord" )
self.check_model_type(snake_case_ )
def __lowerCAmelCase ( self , snake_case_=None , snake_case_=None , snake_case_=None ) -> Optional[Any]:
_a = {}
if frame_sampling_rate is not None:
_a = frame_sampling_rate
if num_frames is not None:
_a = num_frames
_a = {}
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , **snake_case_ ) -> int:
return super().__call__(snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None , snake_case_=1 ) -> List[str]:
if num_frames is None:
_a = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
_a = BytesIO(requests.get(snake_case_ ).content )
_a = VideoReader(snake_case_ )
videoreader.seek(0 )
_a = 0
_a = num_frames * frame_sampling_rate - 1
_a = np.linspace(snake_case_ , snake_case_ , num=snake_case_ , dtype=np.intaa )
_a = videoreader.get_batch(snake_case_ ).asnumpy()
_a = list(snake_case_ )
_a = self.image_processor(snake_case_ , return_tensors=self.framework )
return model_inputs
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
_a = self.model(**snake_case_ )
return model_outputs
def __lowerCAmelCase ( self , snake_case_ , snake_case_=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.softmax(-1 )[0]
_a , _a = probs.topk(snake_case_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 131
| 0
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : list[list[int]] ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCAmelCase_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCAmelCase_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 445
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : List[Any] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""OwlViTFeatureExtractor"""]
a_ : Any = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 445
| 1
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__lowerCamelCase :str , __lowerCamelCase :Any , __lowerCamelCase :str=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
_lowerCAmelCase = nn.Parameter(__lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
_lowerCAmelCase = nn.Parameter(__lowerCamelCase )
def A (__lowerCamelCase :Dict , __lowerCamelCase :Optional[int] , __lowerCamelCase :Tuple ):
# set torch weights for 1-to-1 comparison
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A (__lowerCamelCase :Dict , __lowerCamelCase :List[str] , __lowerCamelCase :str ):
# set torch weights for 1-to-1 comparison
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
_lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Dict , __lowerCamelCase :Any ):
# layernorm 1
_lowerCAmelCase = weights[0][0][0]
_lowerCAmelCase = np.asarray(layer_norm_a[0] )
_lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# lsh weights + output
_lowerCAmelCase = weights[0][1]
if len(__lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
else:
set_layer_weights_in_torch_local(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
# intermediate weighs
_lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCamelCase ) == 4:
_lowerCAmelCase = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# intermediate dense
_lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
# intermediate out
_lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict ):
# reformer model
_lowerCAmelCase = torch_model.reformer
# word embeds
_lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCamelCase ) , )
if isinstance(weights[3] , __lowerCamelCase ):
_lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
_lowerCAmelCase = nn.Parameter(torch.tensor(__lowerCamelCase ) )
_lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# output layer norm
_lowerCAmelCase = np.asarray(weights[7][0] )
_lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# output embeddings
_lowerCAmelCase = np.asarray(weights[9][0] )
_lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def A (__lowerCamelCase :List[str] , __lowerCamelCase :List[Any] , __lowerCamelCase :Tuple ):
# Initialise PyTorch model
_lowerCAmelCase = ReformerConfig.from_json_file(__lowerCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase = ReformerModelWithLMHead(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as f:
_lowerCAmelCase = pickle.load(__lowerCamelCase )["""weights"""]
set_model_weights_in_torch(__lowerCamelCase , __lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowercase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 5
|
'''simple docstring'''
A_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset([])
A_ = frozenset(["image"])
A_ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image"])
A_ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "negative_prompt"])
A_ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
A_ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image", "mask_image"])
A_ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["example_image", "image", "mask_image"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset(["input_tokens"])
A_ = frozenset(["input_tokens"])
| 143
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1_000 ) -> Any:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case : List[str] = n - 1
_snake_case : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case : Dict = 0
while count < prec:
_snake_case : int = random.randint(2 , n - 1 )
_snake_case : str = bin_exp_mod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if b != 1:
_snake_case : Optional[int] = True
for _ in range(SCREAMING_SNAKE_CASE__ ):
if b == n - 1:
_snake_case : List[Any] = False
break
_snake_case : Union[str, Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a__ = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 700
|
from math import ceil, sqrt
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1_000_000 ) -> int:
_snake_case : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_snake_case : int = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_snake_case : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
A_ = logging.get_logger(__name__)
# General docstring
A_ = """ResNetConfig"""
# Base docstring
A_ = """microsoft/resnet-50"""
A_ = [1, 2048, 7, 7]
# Image classification docstring
A_ = """microsoft/resnet-50"""
A_ = """tiger cat"""
A_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ):
super().__init__()
lowerCamelCase_ = nn.Convad(
UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=kernel_size // 2 , bias=UpperCAmelCase )
lowerCamelCase_ = nn.BatchNormad(UpperCAmelCase )
lowerCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = self.convolution(UpperCAmelCase )
lowerCamelCase_ = self.normalization(UpperCAmelCase )
lowerCamelCase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
lowerCamelCase_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCamelCase_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCamelCase_ = config.num_channels
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCamelCase_ = self.embedder(UpperCAmelCase )
lowerCamelCase_ = self.pooler(UpperCAmelCase )
return embedding
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 ):
super().__init__()
lowerCamelCase_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , stride=UpperCAmelCase , bias=UpperCAmelCase )
lowerCamelCase_ = nn.BatchNormad(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = self.convolution(UpperCAmelCase )
lowerCamelCase_ = self.normalization(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ):
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , activation=UpperCAmelCase ) , )
lowerCamelCase_ = ACTaFN[activation]
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(UpperCAmelCase )
lowerCamelCase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowerCamelCase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , UpperCAmelCase = 4 ):
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = out_channels // reduction
lowerCamelCase_ = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase ) , )
lowerCamelCase_ = ACTaFN[activation]
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(UpperCAmelCase )
lowerCamelCase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowerCamelCase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , ):
super().__init__()
lowerCamelCase_ = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowerCamelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , activation=config.hidden_act ) , *[layer(UpperCAmelCase , UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = input
for layer in self.layers:
lowerCamelCase_ = layer(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
lowerCamelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase ) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ):
lowerCamelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
lowerCamelCase_ = stage_module(UpperCAmelCase )
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase ):
a__: Tuple = ResNetConfig
a__: Tuple = 'resnet'
a__: List[Any] = 'pixel_values'
a__: Union[str, Any] = True
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = value
A_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
A_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , lowerCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
lowerCamelCase_ = config
lowerCamelCase_ = ResNetEmbeddings(UpperCAmelCase )
lowerCamelCase_ = ResNetEncoder(UpperCAmelCase )
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.embedder(UpperCAmelCase )
lowerCamelCase_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCamelCase_ = encoder_outputs[0]
lowerCamelCase_ = self.pooler(UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = ResNetModel(UpperCAmelCase )
# classification head
lowerCamelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.resnet(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ = self.classifier(UpperCAmelCase )
lowerCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ = '''single_label_classification'''
else:
lowerCamelCase_ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCamelCase_ = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ = BCEWithLogitsLoss()
lowerCamelCase_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
lowerCamelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , lowerCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
super()._init_backbone(UpperCAmelCase )
lowerCamelCase_ = [config.embedding_size] + config.hidden_sizes
lowerCamelCase_ = ResNetEmbeddings(UpperCAmelCase )
lowerCamelCase_ = ResNetEncoder(UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = self.embedder(UpperCAmelCase )
lowerCamelCase_ = self.encoder(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase , )
| 29
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 0
|
"""simple docstring"""
import operator as op
def snake_case__ ( _lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = []
__lowercase : Any = lambda _lowerCamelCase, _lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
__lowercase : str = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ), "Action".center(12 ), "Stack", sep=" | " )
print("-" * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ("push(" + x + ")").ljust(12 ), ",".join(_lowercase ), sep=" | " )
else:
__lowercase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + b + ")").ljust(12 ), ",".join(_lowercase ), sep=" | " )
__lowercase : Tuple = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + a + ")").ljust(12 ), ",".join(_lowercase ), sep=" | " )
stack.append(
str(opr[x](int(_lowercase ), int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ("push(" + a + x + b + ")").ljust(12 ), ",".join(_lowercase ), sep=" | ", )
return int(stack[0] )
if __name__ == "__main__":
__A : List[str] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 704
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(_lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
| 0
|
'''simple docstring'''
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ):
from torch.utils.cpp_extension import load
_A = Path(__snake_case ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
_A = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 107
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCAmelCase_ = random.Random()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict=1.0 , __UpperCAmelCase: str=None , __UpperCAmelCase: Tuple=None ) -> int:
if rng is None:
UpperCamelCase__ : List[Any] = global_rng
UpperCamelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=400, __magic_name__=2000, __magic_name__=2048, __magic_name__=128, __magic_name__=1, __magic_name__=512, __magic_name__=30, __magic_name__=44100, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : Dict = min_seq_length
UpperCamelCase__ : Dict = max_seq_length
UpperCamelCase__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : Optional[int] = spectrogram_length
UpperCamelCase__ : Union[str, Any] = feature_size
UpperCamelCase__ : int = num_audio_channels
UpperCamelCase__ : List[Any] = hop_length
UpperCamelCase__ : Union[str, Any] = chunk_length
UpperCamelCase__ : Any = sampling_rate
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase__ ( self, __magic_name__=False, __magic_name__=False ) -> Tuple:
"""simple docstring"""
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
UpperCamelCase__ : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ : Tuple = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : str = TvltFeatureExtractor
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = TvltFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__magic_name__, '''spectrogram_length''' ) )
self.assertTrue(hasattr(__magic_name__, '''feature_size''' ) )
self.assertTrue(hasattr(__magic_name__, '''num_audio_channels''' ) )
self.assertTrue(hasattr(__magic_name__, '''hop_length''' ) )
self.assertTrue(hasattr(__magic_name__, '''chunk_length''' ) )
self.assertTrue(hasattr(__magic_name__, '''sampling_rate''' ) )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : str = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
UpperCamelCase__ : List[str] = feat_extract_first.to_dict()
UpperCamelCase__ : Union[str, Any] = feat_extract_second.to_dict()
UpperCamelCase__ : str = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__magic_name__, __magic_name__ ) )
self.assertEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Tuple = os.path.join(__magic_name__, '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
UpperCamelCase__ : List[Any] = self.feature_extraction_class.from_json_file(__magic_name__ )
UpperCamelCase__ : List[Any] = feat_extract_first.to_dict()
UpperCamelCase__ : Tuple = feat_extract_second.to_dict()
UpperCamelCase__ : Any = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : Union[str, Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__magic_name__, __magic_name__ ) )
self.assertEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
# Initialize feature_extractor
UpperCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : int = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ : List[str] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ : int = feature_extractor(np_speech_inputs[0], return_tensors='''np''', sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCamelCase__ : Union[str, Any] = feature_extractor(__magic_name__, return_tensors='''np''', sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCamelCase__ : Optional[int] = feature_extractor(
__magic_name__, return_tensors='''np''', sampling_rate=44100, mask_audio=__magic_name__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ : Tuple = np.asarray(__magic_name__ )
UpperCamelCase__ : int = feature_extractor(__magic_name__, return_tensors='''np''', sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase__ : Optional[int] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self._load_datasamples(1 )
UpperCamelCase__ : Any = TvltFeatureExtractor()
UpperCamelCase__ : Tuple = feature_extractor(__magic_name__, return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape, (1, 1, 192, 128) )
UpperCamelCase__ : Any = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], __magic_name__, atol=1E-4 ) )
| 253
| 0
|
'''simple docstring'''
import math
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCAmelCase__ : List[Any] = range(3 , int(math.sqrt(_A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=1 , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = factor * value
lowerCAmelCase__ : int = value
while not is_prime(_A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_A )
return value
| 719
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Any = FunnelTokenizer
A_ : Dict = FunnelTokenizerFast
A_ : Dict = True
A_ : Tuple = True
def _A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Union[str, Any] , **a__ : str ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Optional[int] , **a__ : str ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Tuple , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "UNwant\u00E9d,running"
lowerCAmelCase__ : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
lowerCAmelCase__ : Tuple = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ : str = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ : Optional[int] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 568
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self , lowercase = 768 , ) -> str:
super().__init__()
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , lowercase ) )
lowerCamelCase_ = nn.Parameter(torch.ones(1 , lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase = None , ) -> Optional[int]:
lowerCamelCase_ = nn.Parameter(self.mean.to(lowercase ).to(lowercase ) )
lowerCamelCase_ = nn.Parameter(self.std.to(lowercase ).to(lowercase ) )
return self
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 463
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A ={
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 463
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """AutoImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
_lowerCamelCase : str = self.image_processor
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCamelCase : int = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
_lowerCamelCase : List[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
_lowerCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A_ ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 492
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase = 101 ):
_lowerCamelCase : Optional[int] = length
def __len__( self ):
return self.length
def __getitem__( self , lowercase ):
return i
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self , lowercase ):
return {"input_ids": torch.tensor(lowercase ), "labels": torch.tensor(lowercase )}
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase : int = nn.Linear(120 , 80 )
def A_ ( self , lowercase , lowercase=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@require_torch_neuroncore
def A_ ( self ):
_lowerCamelCase : Optional[Any] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = F'''--output_dir {output_dir}'''.split()
_lowerCamelCase : Tuple = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@require_torch_multi_gpu
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = F'''--output_dir {output_dir}'''.split()
_lowerCamelCase : Any = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase__ = HfArgumentParser((TrainingArguments,))
lowercase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase__ = DummyDataset(dataset_length)
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowercase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ = 2
lowercase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ = None
| 492
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 547
|
from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCamelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__ = 800_800 , a__ = 800_800 ) ->int:
'''simple docstring'''
_UpperCamelCase = degree * loga(a__ )
_UpperCamelCase = int(a__ )
_UpperCamelCase = calculate_prime_numbers(a__ )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 547
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=0 ):
__a : Optional[int] = []
for old_item in old_list:
__a : List[str] = old_item.replace('in_layers.0' , 'norm1' )
__a : Optional[Any] = new_item.replace('in_layers.2' , 'conv1' )
__a : Union[str, Any] = new_item.replace('out_layers.0' , 'norm2' )
__a : str = new_item.replace('out_layers.3' , 'conv2' )
__a : Optional[int] = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__a : Optional[int] = new_item.replace('skip_connection' , 'conv_shortcut' )
__a : Optional[Any] = shave_segments(lowerCamelCase_ , n_shave_prefix_segments=lowerCamelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str=0 ):
__a : Tuple = []
for old_item in old_list:
__a : int = old_item
__a : Union[str, Any] = new_item.replace('norm.weight' , 'group_norm.weight' )
__a : Dict = new_item.replace('norm.bias' , 'group_norm.bias' )
__a : Tuple = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__a : Tuple = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__a : List[Any] = shave_segments(lowerCamelCase_ , n_shave_prefix_segments=lowerCamelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Tuple=None ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__a : Optional[int] = old_checkpoint[path]
__a : Optional[Any] = old_tensor.shape[0] // 3
__a : Optional[int] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__a : Union[str, Any] = old_tensor.shape[0] // config['num_head_channels'] // 3
__a : Optional[int] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__a , __a , __a : Dict = old_tensor.split(channels // num_heads , dim=1 )
__a : str = query.reshape(lowerCamelCase_ )
__a : Tuple = key.reshape(lowerCamelCase_ )
__a : str = value.reshape(lowerCamelCase_ )
for path in paths:
__a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__a : Optional[int] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__a : List[str] = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__a : Optional[int] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__a : str = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__a : Dict = old_checkpoint[path['old']][:, :, 0]
else:
__a : Tuple = old_checkpoint[path['old']]
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
__a : List[str] = {}
__a : List[str] = checkpoint['time_embed.0.weight']
__a : int = checkpoint['time_embed.0.bias']
__a : Union[str, Any] = checkpoint['time_embed.2.weight']
__a : Any = checkpoint['time_embed.2.bias']
__a : List[str] = checkpoint['input_blocks.0.0.weight']
__a : Tuple = checkpoint['input_blocks.0.0.bias']
__a : Dict = checkpoint['out.0.weight']
__a : Any = checkpoint['out.0.bias']
__a : str = checkpoint['out.2.weight']
__a : Tuple = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__a : List[str] = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(lowerCamelCase_ )
}
# Retrieves the keys for the middle blocks only
__a : Tuple = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__a : str = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(lowerCamelCase_ )
}
# Retrieves the keys for the output blocks only
__a : Any = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__a : Tuple = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(lowerCamelCase_ )
}
for i in range(1 , lowerCamelCase_ ):
__a : List[str] = (i - 1) // (config['num_res_blocks'] + 1)
__a : Dict = (i - 1) % (config['num_res_blocks'] + 1)
__a : List[Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
__a : Optional[Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
__a : List[str] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
__a : Optional[Any] = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
__a : Optional[int] = renew_resnet_paths(lowerCamelCase_ )
__a : int = {'old': f'''input_blocks.{i}.0''', 'new': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
__a : str = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCamelCase_ )
if len(lowerCamelCase_ ):
__a : Union[str, Any] = renew_attention_paths(lowerCamelCase_ )
__a : List[str] = {
'old': f'''input_blocks.{i}.1''',
'new': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
__a : Union[str, Any] = {
f'''input_blocks.{i}.1.qkv.bias''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCamelCase_ , config=lowerCamelCase_ , )
__a : Optional[int] = middle_blocks[0]
__a : Union[str, Any] = middle_blocks[1]
__a : int = middle_blocks[2]
__a : Dict = renew_resnet_paths(lowerCamelCase_ )
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , config=lowerCamelCase_ )
__a : List[Any] = renew_resnet_paths(lowerCamelCase_ )
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , config=lowerCamelCase_ )
__a : Optional[int] = renew_attention_paths(lowerCamelCase_ )
__a : str = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , attention_paths_to_split=lowerCamelCase_ , config=lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
__a : Optional[int] = i // (config['num_res_blocks'] + 1)
__a : Union[str, Any] = i % (config['num_res_blocks'] + 1)
__a : int = [shave_segments(lowerCamelCase_ , 2 ) for name in output_blocks[i]]
__a : int = {}
for layer in output_block_layers:
__a , __a : Optional[int] = layer.split('.' )[0], shave_segments(lowerCamelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCamelCase_ )
else:
__a : Any = [layer_name]
if len(lowerCamelCase_ ) > 1:
__a : List[Any] = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
__a : Dict = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
__a : Union[str, Any] = renew_resnet_paths(lowerCamelCase_ )
__a : List[Any] = renew_resnet_paths(lowerCamelCase_ )
__a : List[Any] = {'old': f'''output_blocks.{i}.0''', 'new': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__a : Optional[int] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__a : List[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
__a : Optional[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(lowerCamelCase_ ) == 2:
__a : Tuple = []
if len(lowerCamelCase_ ):
__a : int = renew_attention_paths(lowerCamelCase_ )
__a : Dict = {
'old': f'''output_blocks.{i}.1''',
'new': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
__a : Tuple = {
f'''output_blocks.{i}.1.qkv.bias''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCamelCase_ , )
else:
__a : List[Any] = renew_resnet_paths(lowerCamelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__a : Optional[int] = '.'.join(['output_blocks', str(lowerCamelCase_ ), path['old']] )
__a : Tuple = '.'.join(['up_blocks', str(lowerCamelCase_ ), 'resnets', str(lowerCamelCase_ ), path['new']] )
__a : Tuple = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read())
SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 577
|
import torch
from transformers import AutoModel
class _UpperCamelCase( torch.nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
__a : List[str] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1e-08 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.bert(**SCREAMING_SNAKE_CASE__ ).last_hidden_state
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = W_supports['sizes'].tolist()
__a : Dict = W_supports['start_token_id'].item()
__a : Tuple = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Dict = None
__a : str = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Any = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
__a : str = 0
else:
__a : str = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Dict = S[s : s + size][end_token_masks[s : s + size]]
__a : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : List[Any] = torch.vstack((p_starts, p_start) )
__a : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
__a : str = p_start
__a : List[Any] = p_end
return p_starts, p_ends
| 577
| 1
|
from __future__ import annotations
import requests
def __snake_case ( lowerCAmelCase_ ) -> dict:
SCREAMING_SNAKE_CASE__ = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase_ ).json()
def __snake_case ( lowerCAmelCase_ = 1_0 ) -> list[dict]:
SCREAMING_SNAKE_CASE__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
SCREAMING_SNAKE_CASE__ = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def __snake_case ( lowerCAmelCase_ = 1_0 ) -> str:
SCREAMING_SNAKE_CASE__ = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 100
|
def __snake_case ( ) -> int:
return 1
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ = 2_0_0 ) -> int:
return two_pound(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 100
| 1
|
def A ( lowercase__ : str ) -> str:
UpperCamelCase__ :Union[str, Any] = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A ( lowercase__ : str ) -> dict[str, str]:
UpperCamelCase__ :List[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase__ :Any = remove_duplicates(key.upper() )
UpperCamelCase__ :Tuple = len(lowercase__ )
# First fill cipher with key characters
UpperCamelCase__ :Union[str, Any] = {alphabet[i]: char for i, char in enumerate(lowercase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase__ ) , 26 ):
UpperCamelCase__ :Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase__ :Any = alphabet[i - offset]
UpperCamelCase__ :List[str] = char
return cipher_alphabet
def A ( lowercase__ : str , lowercase__ : dict[str, str] ) -> str:
return "".join(cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def A ( lowercase__ : str , lowercase__ : dict[str, str] ) -> str:
UpperCamelCase__ :Dict = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def A ( ) -> None:
UpperCamelCase__ :List[Any] = input("""Enter message to encode or decode: """ ).strip()
UpperCamelCase__ :List[str] = input("""Enter keyword: """ ).strip()
UpperCamelCase__ :Any = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
UpperCamelCase__ :List[str] = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
UpperCamelCase__ :Tuple = create_cipher_map(lowercase__ )
print(func(lowercase__ , lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 383
|
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[str] ):
UpperCamelCase__ :List[str] = (0, 0)
UpperCamelCase__ :Dict = None
UpperCamelCase__ :List[str] = 0
UpperCamelCase__ :Any = 0
UpperCamelCase__ :Optional[int] = 0
def __eq__( self :Optional[int] , lowerCamelCase__ :Dict ):
return self.position == cell.position
def __a ( self :str ):
print(self.position )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase__ :List[Any]=(5, 5) ):
UpperCamelCase__ :Optional[int] = np.zeros(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = world_size[0]
UpperCamelCase__ :Optional[Any] = world_size[1]
def __a ( self :Optional[int] ):
print(self.w )
def __a ( self :Dict , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCamelCase__ :List[str] = cell.position[0]
UpperCamelCase__ :List[Any] = cell.position[1]
UpperCamelCase__ :Tuple = []
for n in neughbour_cord:
UpperCamelCase__ :int = current_x + n[0]
UpperCamelCase__ :List[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCamelCase__ :List[Any] = Cell()
UpperCamelCase__ :Optional[int] = (x, y)
UpperCamelCase__ :str = cell
neighbours.append(lowerCamelCase__ )
return neighbours
def A ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :int = []
_open.append(lowercase__ )
while _open:
UpperCamelCase__ :int = np.argmin([n.f for n in _open] )
UpperCamelCase__ :str = _open[min_f]
_closed.append(_open.pop(lowercase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowercase__ ):
for c in _closed:
if c == n:
continue
UpperCamelCase__ :Optional[int] = current.g + 1
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = n.position
UpperCamelCase__ , UpperCamelCase__ :int = goal.position
UpperCamelCase__ :List[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCamelCase__ :Union[str, Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase__ )
UpperCamelCase__ :Optional[int] = []
while current.parent is not None:
path.append(current.position )
UpperCamelCase__ :Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase = Gridworld()
# Start position and goal
UpperCamelCase = Cell()
UpperCamelCase = (0, 0)
UpperCamelCase = Cell()
UpperCamelCase = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
UpperCamelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase = 1
print(world.w)
| 383
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Tuple = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'blip_text_model'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str=3_0_5_2_4 , SCREAMING_SNAKE_CASE : str=7_6_8 , SCREAMING_SNAKE_CASE : Dict=7_6_8 , SCREAMING_SNAKE_CASE : Union[str, Any]=3_0_7_2 , SCREAMING_SNAKE_CASE : Dict=7_6_8 , SCREAMING_SNAKE_CASE : List[Any]=1_2 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=5_1_2 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : int=1E-12 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0_2 , SCREAMING_SNAKE_CASE : Any=3_0_5_2_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Optional[int]=1_0_2 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[str]=True , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , sep_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = encoder_hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = projection_dim
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = hidden_act
lowerCAmelCase = initializer_range
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = is_decoder
lowerCAmelCase = use_cache
@classmethod
def __A ( cls : List[Any] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCAmelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'blip_vision_model'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str=7_6_8 , SCREAMING_SNAKE_CASE : Union[str, Any]=3_0_7_2 , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Optional[int]=1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE : List[Any]=3_8_4 , SCREAMING_SNAKE_CASE : List[Any]=1_6 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : int=1E-5 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : List[str]=1E-10 , **SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = projection_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = patch_size
lowerCAmelCase = image_size
lowerCAmelCase = initializer_range
lowerCAmelCase = attention_dropout
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = hidden_act
@classmethod
def __A ( cls : List[str] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : int ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCAmelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'blip'
lowerCAmelCase = True
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE : int=2.6_5_9_2 , SCREAMING_SNAKE_CASE : List[Any]=2_5_6 , **SCREAMING_SNAKE_CASE : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if text_config is None:
lowerCAmelCase = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
lowerCAmelCase = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
lowerCAmelCase = BlipTextConfig(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = BlipVisionConfig(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.vision_config.hidden_size
lowerCAmelCase = projection_dim
lowerCAmelCase = logit_scale_init_value
lowerCAmelCase = 1.0
lowerCAmelCase = 0.0_2
lowerCAmelCase = image_text_hidden_size
@classmethod
def __A ( cls : List[Any] , SCREAMING_SNAKE_CASE : BlipTextConfig , SCREAMING_SNAKE_CASE : BlipVisionConfig , **SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __A ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.text_config.to_dict()
lowerCAmelCase = self.vision_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 649
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : str = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
def a__ ( A_ ):
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 76
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[Any] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
a__ : int = logging.get_logger(__name__)
class __snake_case ( __magic_name__ ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 368
| 1
|
'''simple docstring'''
import math
import sys
def __lowercase (_lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = """"""
try:
with open(_lowercase, """rb""" ) as binary_file:
__lowerCamelCase : str = binary_file.read()
for dat in data:
__lowerCamelCase : Dict = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowercase (_lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : str = {"""0""": """0""", """1""": """1"""}
__lowerCamelCase : Optional[int] = """""", """"""
__lowerCamelCase : Optional[Any] = len(_lowercase )
for i in range(len(_lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCamelCase : Union[str, Any] = lexicon[curr_string]
result += last_match_id
__lowerCamelCase : List[Any] = last_match_id + """0"""
if math.loga(_lowercase ).is_integer():
__lowerCamelCase : List[Any] = {}
for curr_key in list(_lowercase ):
__lowerCamelCase : Tuple = lexicon.pop(_lowercase )
__lowerCamelCase : int = new_lex
__lowerCamelCase : int = last_match_id + """1"""
index += 1
__lowerCamelCase : int = """"""
return result
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = 8
try:
with open(_lowercase, """wb""" ) as opened_file:
__lowerCamelCase : Dict = [
to_write[i : i + byte_length]
for i in range(0, len(_lowercase ), _lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase, 2 ).to_bytes(1, byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowercase (_lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : Dict = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCamelCase : Union[str, Any] = data_bits[counter:]
__lowerCamelCase : Any = data_bits[counter + 1 :]
return data_bits
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : List[str] = read_file_binary(_lowercase )
__lowerCamelCase : str = remove_prefix(_lowercase )
__lowerCamelCase : Union[str, Any] = decompress_data(_lowercase )
write_file_binary(_lowercase, _lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 718
|
'''simple docstring'''
import math
def __lowercase () -> None:
"""simple docstring"""
__lowerCamelCase : Optional[int] = input("""Enter message: """ )
__lowerCamelCase : Optional[Any] = int(input(f"Enter key [2-{len(_lowercase ) - 1}]: " ) )
__lowerCamelCase : Dict = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCamelCase : Any = encrypt_message(_lowercase, _lowercase )
elif mode.lower().startswith("""d""" ):
__lowerCamelCase : int = decrypt_message(_lowercase, _lowercase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def __lowercase (_lowercase, _lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : int = [""""""] * key
for col in range(_lowercase ):
__lowerCamelCase : Optional[Any] = col
while pointer < len(_lowercase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowercase )
def __lowercase (_lowercase, _lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : List[str] = math.ceil(len(_lowercase ) / key )
__lowerCamelCase : List[str] = key
__lowerCamelCase : Tuple = (num_cols * num_rows) - len(_lowercase )
__lowerCamelCase : List[str] = [""""""] * num_cols
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__lowerCamelCase : int = 0
row += 1
return "".join(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 483
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = DebertaTokenizer
A = True
A = DebertaTokenizerFast
def lowerCamelCase__ ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
UpperCamelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ = {"unk_token": "[UNK]"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase_ ) )
def lowerCamelCase__ ( self :int , **lowerCamelCase_ :Tuple ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :Dict ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowerCamelCase__ ( self :List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCamelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase__ ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tokenizer("Hello" , "World" )
UpperCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowerCamelCase_ )
@slow
def lowerCamelCase__ ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
UpperCamelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase__ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase__ = tokenizer_class.from_pretrained("microsoft/deberta-base" )
UpperCamelCase__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
UpperCamelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ )
UpperCamelCase__ = [tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) for seq in encoding["input_ids"]]
# fmt: off
UpperCamelCase__ = {
"input_ids": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowerCamelCase_ )
for expected, decoded in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 516
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def snake_case__ ( _snake_case : np.ndarray , _snake_case : tuple[int, int] , _snake_case : tuple[int, int] , _snake_case : bool , ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = grid.shape
UpperCamelCase__ = [-1, 1, 0, 0]
UpperCamelCase__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase__ , UpperCamelCase__ = [(0, source)], set()
UpperCamelCase__ = np.full((rows, cols) , np.inf )
UpperCamelCase__ = 0
UpperCamelCase__ = np.empty((rows, cols) , dtype=_snake_case )
UpperCamelCase__ = None
while queue:
((UpperCamelCase__) , (UpperCamelCase__)) = heappop(_snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase__ = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase__ , UpperCamelCase__ = predecessors[x, y]
path.append(_snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_snake_case ) ):
UpperCamelCase__ , UpperCamelCase__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_snake_case , (dist + 1, (nx, ny)) )
UpperCamelCase__ = dist + 1
UpperCamelCase__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CanineTokenizer
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
UpperCamelCase : List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCamelCase ) -> CanineTokenizer:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
UpperCamelCase : List[str] = 10_24
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Tuple = self.canine_tokenizer
UpperCamelCase : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCamelCase : int = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
UpperCamelCase : Optional[Any] = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
UpperCamelCase : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[Any] = self.canine_tokenizer
UpperCamelCase : Any = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCamelCase : str = tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , lowerCamelCase )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertIn("token_type_ids" , lowerCamelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : str = self.canine_tokenizer
UpperCamelCase : List[str] = [
"What's the weater?",
"It's about 25 degrees.",
]
UpperCamelCase : List[str] = tokenizer(
text_target=lowerCamelCase , max_length=32 , padding="max_length" , truncation=lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase : Tuple = tempfile.mkdtemp()
UpperCamelCase : Optional[Any] = " He is very happy, UNwant\u00E9d,running"
UpperCamelCase : Tuple = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
UpperCamelCase : str = tokenizer.__class__.from_pretrained(lowerCamelCase )
UpperCamelCase : Optional[int] = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
shutil.rmtree(lowerCamelCase )
UpperCamelCase : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase : List[Any] = tempfile.mkdtemp()
UpperCamelCase : Dict = " He is very happy, UNwant\u00E9d,running"
UpperCamelCase : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCamelCase : Tuple = chr(0xE0_07 )
additional_special_tokens.append(lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCamelCase : Tuple = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
UpperCamelCase : Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase )
UpperCamelCase : str = after_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertIn(lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase : Optional[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase , UpperCamelCase : Tuple = self.get_clean_sequence(lowerCamelCase )
# a special token for Canine can be defined as follows:
UpperCamelCase : Optional[Any] = 0xE0_05
UpperCamelCase : List[Any] = chr(lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCamelCase : Tuple = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 1 )
UpperCamelCase : Optional[int] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase )
UpperCamelCase : Union[str, Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : List[Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , input_encoded + special_token_id )
UpperCamelCase : Optional[int] = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Dict = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase : Any = chr(0xE0_05 )
UpperCamelCase : Optional[Any] = chr(0xE0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCamelCase : Tuple = tokenizer.tokenize(lowerCamelCase )
UpperCamelCase : Union[str, Any] = tokenizer.tokenize(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , lowerCamelCase )
self.assertEqual(token_a[0] , lowerCamelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : str = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
UpperCamelCase : Optional[Any] = 0xE0_06
UpperCamelCase : List[Any] = chr(lowerCamelCase )
UpperCamelCase : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCamelCase )
tokenizer.from_pretrained(lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCamelCase : Optional[Any] = json.load(lowerCamelCase )
with open(os.path.join(lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCamelCase : Any = json.load(lowerCamelCase )
# a special token for Canine can be defined as follows:
UpperCamelCase : Any = 0xE0_06
UpperCamelCase : Any = chr(lowerCamelCase )
UpperCamelCase : Tuple = [new_token_a]
UpperCamelCase : Optional[int] = [new_token_a]
with open(os.path.join(lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCamelCase , lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase : str = tokenizer_class.from_pretrained(lowerCamelCase , extra_ids=0 )
self.assertIn(lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCamelCase : Optional[int] = 0xE0_07
UpperCamelCase : Optional[Any] = chr(lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase : str = [AddedToken(lowerCamelCase , lstrip=lowerCamelCase )]
UpperCamelCase : Any = tokenizer_class.from_pretrained(
lowerCamelCase , additional_special_tokens=lowerCamelCase , extra_ids=0 )
self.assertIn(lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Dict = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase : int = "hello world"
if self.space_between_special_tokens:
UpperCamelCase : Any = "[CLS] hello world [SEP]"
else:
UpperCamelCase : Any = input
UpperCamelCase : Any = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCamelCase : Union[str, Any] = tokenizer.decode(lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCamelCase , [output, output.lower()] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCamelCase : int = "a"
UpperCamelCase : str = ord(lowerCamelCase )
for attr in attributes_list:
setattr(lowerCamelCase , attr + "_id" , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , attr + "_id" ) , lowerCamelCase )
setattr(lowerCamelCase , attr + "_id" , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
self.assertEqual(getattr(lowerCamelCase , attr + "_id" ) , lowerCamelCase )
setattr(lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens_ids" ) , [] )
UpperCamelCase : Optional[int] = 0xE0_06
UpperCamelCase : Tuple = chr(lowerCamelCase )
setattr(lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
pass
| 435
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase_ = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCAmelCase_ = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCAmelCase_ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCAmelCase_ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCAmelCase_ = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase_ = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCAmelCase_ = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def A__ ( ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[str] = randrange(len(A)), randrange(len(A))
UpperCamelCase : Tuple = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def A__ ( A : int = 1_00):
'''simple docstring'''
return (generate_random_hand() for _ in range(A))
@pytest.mark.parametrize("hand, expected" , A)
def A__ ( A : List[Any] , A : Union[str, Any]):
'''simple docstring'''
assert PokerHand(A)._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , A)
def A__ ( A : Any , A : Any):
'''simple docstring'''
assert PokerHand(A)._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , A)
def A__ ( A : Optional[Any] , A : Any , A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Dict = PokerHand(A)
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , A)
def A__ ( A : str , A : Any):
'''simple docstring'''
assert PokerHand(A)._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , A)
def A__ ( A : str , A : Optional[int]):
'''simple docstring'''
assert PokerHand(A)._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , A)
def A__ ( A : List[str] , A : Optional[int] , A : Dict):
'''simple docstring'''
assert PokerHand(A).compare_with(PokerHand(A)) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands())
def A__ ( A : List[str] , A : Optional[int] , A : str):
'''simple docstring'''
assert PokerHand(A).compare_with(PokerHand(A)) == expected
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [PokerHand(A) for hand in SORTED_HANDS]
UpperCamelCase : Union[str, Any] = poker_hands.copy()
shuffle(A)
UpperCamelCase : List[Any] = chain(sorted(A))
for index, hand in enumerate(A):
assert hand == poker_hands[index]
def A__ ( ):
'''simple docstring'''
UpperCamelCase : List[Any] = [PokerHand("2D AC 3H 4H 5S"), PokerHand("2S 3H 4H 5S 6C")]
pokerhands.sort(reverse=A)
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def A__ ( ):
'''simple docstring'''
UpperCamelCase : List[Any] = PokerHand("2C 4S AS 3D 5C")
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = [5, 4, 3, 2, 14]
for _ in range(10):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def A__ ( ):
'''simple docstring'''
UpperCamelCase : List[str] = 0
UpperCamelCase : List[str] = os.path.abspath(os.path.dirname(A))
UpperCamelCase : str = os.path.join(A , "poker_hands.txt")
with open(A) as file_hand:
for line in file_hand:
UpperCamelCase : Any = line[:14].strip()
UpperCamelCase : List[str] = line[15:].strip()
UpperCamelCase , UpperCamelCase : Any = PokerHand(A), PokerHand(A)
UpperCamelCase : Union[str, Any] = player.compare_with(A)
if output == "Win":
answer += 1
assert answer == 3_76
| 435
| 1
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , a=0.01 , a=10_00 ) -> Any:
snake_case_ = p_stop
snake_case_ = max_length
def __iter__( self ) -> str:
snake_case_ = 0
snake_case_ = False
while not stop and count < self.max_length:
yield count
count += 1
snake_case_ = random.random() < self.p_stop
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a=False , a=True ) -> Optional[Any]:
snake_case_ = [
BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
for i in range(2 )
]
snake_case_ = [list(lowerCAmelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase_ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase_ ) for e in expected] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> int:
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> int:
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
snake_case_ = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> str:
snake_case_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
snake_case_ = [BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _UpperCamelCase ( self , a , a , a , a=False , a=2 , a=False ) -> Tuple:
random.seed(lowerCAmelCase_ )
snake_case_ = list(lowerCAmelCase_ )
snake_case_ = [
IterableDatasetShard(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ , num_processes=lowerCAmelCase_ , process_index=lowerCAmelCase_ , split_batches=lowerCAmelCase_ , )
for i in range(lowerCAmelCase_ )
]
snake_case_ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase_ )
iterable_dataset_lists.append(list(lowerCAmelCase_ ) )
snake_case_ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
snake_case_ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
self.assertTrue(len(lowerCAmelCase_ ) % shard_batch_size == 0 )
snake_case_ = []
for idx in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ):
reference += reference
self.assertListEqual(lowerCAmelCase_ , reference[: len(lowerCAmelCase_ )] )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = 42
snake_case_ = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Edge case with a very small dataset
snake_case_ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
snake_case_ = SkipBatchSampler(lowerCAmelCase_ , 2 )
self.assertListEqual(list(lowerCAmelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = DataLoader(list(range(16 ) ) , batch_size=4 )
snake_case_ = skip_first_batches(lowerCAmelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _UpperCamelCase ( self ) -> str:
Accelerator()
snake_case_ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 198
|
'''simple docstring'''
from itertools import product
def _snake_case ( A_ : int , A_ : int ):
"""simple docstring"""
a_ : List[str] = sides_number
a_ : Tuple = max_face_number * dice_number
a_ : Optional[Any] = [0] * (max_total + 1)
a_ : Any = 1
a_ : Optional[Any] = range(A_ , max_face_number + 1 )
for dice_numbers in product(A_ , repeat=A_ ):
a_ : Union[str, Any] = sum(A_ )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ):
"""simple docstring"""
a_ : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a_ : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a_ : Optional[Any] = 0
a_ : List[Any] = 9
a_ : Optional[int] = 4 * 9
a_ : List[Any] = 6
for peter_total in range(A_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a_ : str = (4**9) * (6**6)
a_ : Any = peter_wins_count / total_games_number
a_ : List[Any] = round(A_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 577
| 0
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase : int = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = list(s_dict.keys() )
for key in keys:
__lowerCAmelCase = r".*/layers_(\d+)"
__lowerCAmelCase = key
if re.match(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = re.sub(r"layers_(\d+)" , r"block/\1/layer" , lowerCamelCase )
__lowerCAmelCase = r"(encoder|decoder)\/"
if re.match(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = re.match(lowerCamelCase , lowerCamelCase ).groups()
if groups[0] == "encoder":
__lowerCAmelCase = re.sub(r"/mlp/" , r"/1/mlp/" , lowerCamelCase )
__lowerCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , lowerCamelCase )
elif groups[0] == "decoder":
__lowerCAmelCase = re.sub(r"/mlp/" , r"/2/mlp/" , lowerCamelCase )
__lowerCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , lowerCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowerCAmelCase = new_key.replace(lowerCamelCase , lowerCamelCase )
print(f'''{key} -> {new_key}''' )
__lowerCAmelCase = s_dict.pop(lowerCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowerCAmelCase = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowerCAmelCase = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowerCAmelCase = s_dict[key].shape[0]
__lowerCAmelCase = s_dict[key]
for idx in range(lowerCamelCase ):
__lowerCAmelCase = expert_weihts[idx]
print(f'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(lowerCamelCase )
return s_dict
lowerCAmelCase : Any = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
import regex as re
with open(lowerCamelCase , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = re.findall(r"(.*) = ([0-9.]*)" , lowerCamelCase )
__lowerCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowerCAmelCase = float(lowerCamelCase ) if "." in value else int(lowerCamelCase )
__lowerCAmelCase = re.findall(r"(.*activations) = \(\'(.*)\',\)" , lowerCamelCase )[0]
__lowerCAmelCase = str(activation[1] )
__lowerCAmelCase = num_experts
__lowerCAmelCase = SwitchTransformersConfig(**lowerCamelCase )
return config
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : List[str]=None , lowerCamelCase : Tuple="./" , lowerCamelCase : Tuple=8 ):
'''simple docstring'''
print(f'''Loading flax weights from : {flax_checkpoint_path}''' )
__lowerCAmelCase = checkpoints.load_tax_checkpoint(lowerCamelCase )
if gin_file is not None:
__lowerCAmelCase = convert_gin_to_config(lowerCamelCase , lowerCamelCase )
else:
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained(lowerCamelCase )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration(lowerCamelCase )
__lowerCAmelCase = flax_params["target"]
__lowerCAmelCase = flatten_dict(lowerCamelCase , sep="/" )
__lowerCAmelCase = rename_keys(lowerCamelCase )
__lowerCAmelCase = unflatten_dict(lowerCamelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCamelCase , lowerCamelCase )
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 39
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 1
|
from math import asin, atan, cos, radians, sin, sqrt, tan
__UpperCamelCase : Optional[Any] = 6_3_7_8_1_3_7.0
__UpperCamelCase : Tuple = 6_3_5_6_7_5_2.3_1_4_2_4_5
__UpperCamelCase : Any = 6378137
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = (AXIS_A - AXIS_B) / AXIS_A
__lowercase = atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__lowercase = atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__lowercase = radians(lowerCamelCase )
__lowercase = radians(lowerCamelCase )
# Equation
__lowercase = sin((phi_a - phi_a) / 2 )
__lowercase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowercase = sqrt(sin_sq_phi + (cos(lowerCamelCase ) * cos(lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def lowercase_ ( *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __magic_name__ ( unittest.TestCase ):
@require_torch
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_lowercase: List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: str = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A_ ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
_lowercase: Any = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
] , )
@require_tf
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Union[str, Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_lowercase: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Union[str, Any] = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(A_ ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
_lowercase: List[str] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
] , )
@slow
@require_torch
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Tuple = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_lowercase: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Optional[Any] = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowercase: Optional[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Tuple = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_lowercase: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Optional[int] = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowercase: Optional[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
| 353
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = ['''pixel_values''']
def __init__( self : str , _a : bool = True , _a : Dict[str, int] = None , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : bool = True , _a : Dict[str, int] = None , _a : bool = True , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : bool = True , **_a : Union[str, Any] , ):
super().__init__(**_a )
UpperCamelCase__ = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ = get_size_dict(_a , default_to_square=_a )
UpperCamelCase__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def A_ ( self : Tuple , _a : np.ndarray , _a : Dict[str, int] , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : Optional[Union[str, ChannelDimension]] = None , **_a : int , ):
UpperCamelCase__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def A_ ( self : Optional[int] , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Tuple , ):
UpperCamelCase__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def A_ ( self : int , _a : np.ndarray , _a : Union[int, float] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Dict , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def A_ ( self : Tuple , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Union[str, Any] , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def A_ ( self : List[str] , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : int = None , _a : bool = None , _a : float = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : bool = None , _a : Optional[Union[str, TensorType]] = None , _a : Optional[ChannelDimension] = ChannelDimension.FIRST , **_a : str , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(_a , param_name='''size''' , default_to_square=_a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(_a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(_a , _a ) for image in images]
UpperCamelCase__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 591
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int]=None ):
'''simple docstring'''
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('''tpu-config''', description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('''Accelerate tpu-config command''', description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'''Config Arguments''', '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''', type=UpperCamelCase__, default=UpperCamelCase__, help='''Path to the config file to use for accelerate.''', )
config_args.add_argument(
'''--tpu_name''', default=UpperCamelCase__, help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''', )
config_args.add_argument(
'''--tpu_zone''', default=UpperCamelCase__, help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''', )
UpperCamelCase__ = parser.add_argument_group('''TPU Arguments''', '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''', action='''store_true''', help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''', )
pod_args.add_argument(
'''--command_file''', default=UpperCamelCase__, help='''The path to the file containing the commands to run on the pod on startup.''', )
pod_args.add_argument(
'''--command''', action='''append''', nargs='''+''', help='''A command to run on the pod. Can be passed multiple times.''', )
pod_args.add_argument(
'''--install_accelerate''', action='''store_true''', help='''Whether to install accelerate on the pod. Defaults to False.''', )
pod_args.add_argument(
'''--accelerate_version''', default='''latest''', help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''', )
pod_args.add_argument(
'''--debug''', action='''store_true''', help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCamelCase__ ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCamelCase__ = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ), UpperCamelCase__ ):
UpperCamelCase__ = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file, '''r''' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], UpperCamelCase__ ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase__ = '''; '''.join(UpperCamelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(UpperCamelCase__ )}""" )
return
subprocess.run(UpperCamelCase__ )
print('''Successfully setup pod.''' )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(UpperCamelCase__ )
| 591
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 100 , ) -> float:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = x_start
__UpperCAmelCase : List[str] = fnc(UpperCamelCase )
__UpperCAmelCase : Tuple = 0.0
for _ in range(UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__UpperCAmelCase : int = (x_end - x_start) / steps + xa
__UpperCAmelCase : Tuple = fnc(UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__UpperCAmelCase : List[Any] = xa
__UpperCAmelCase : Dict = fxa
return area
if __name__ == "__main__":
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
A = 10
while i <= 100_000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 77
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] )-> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : Optional[Any] )-> int:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A ( self : str )-> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A ( self : str )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
__UpperCamelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A ( self : Tuple )-> List[str]:
__UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=A_ , unet=self.dummy_unet , mel=A_ , scheduler=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=A_ , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=A_ , steps=4 , return_dict=A_ )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A_ , scheduler=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(raw_audio=A_ , generator=A_ , start_step=5 , steps=10 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A_ , mel=A_ , scheduler=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 10) )
__UpperCamelCase = pipe(generator=A_ , encoding=A_ )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str )-> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] )-> Union[str, Any]:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=A_ )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 505
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( a__ ,a__=False):
_SCREAMING_SNAKE_CASE =[]
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias"))
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "deit" from all keys that start with "deit"
_SCREAMING_SNAKE_CASE =[(pair[0], pair[1][4:]) if pair[1].startswith('''deit''') else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
])
return rename_keys
def SCREAMING_SNAKE_CASE__( a__ ,a__ ,a__=False):
for i in range(config.num_hidden_layers):
if base_model:
_SCREAMING_SNAKE_CASE =''''''
else:
_SCREAMING_SNAKE_CASE ='''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.weight")
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE =in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =dct.pop(a__)
_SCREAMING_SNAKE_CASE =val
def SCREAMING_SNAKE_CASE__( ):
_SCREAMING_SNAKE_CASE ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw)
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( a__ ,a__):
_SCREAMING_SNAKE_CASE =DeiTConfig()
# all deit models have fine-tuned heads
_SCREAMING_SNAKE_CASE =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_SCREAMING_SNAKE_CASE =1000
_SCREAMING_SNAKE_CASE ='''huggingface/label-files'''
_SCREAMING_SNAKE_CASE ='''imagenet-1k-id2label.json'''
_SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(a__ ,a__ ,repo_type='''dataset''') ,'''r'''))
_SCREAMING_SNAKE_CASE ={int(a__): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =idalabel
_SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =int(deit_name[-6:-4])
_SCREAMING_SNAKE_CASE =int(deit_name[-3:])
# size of the architecture
if deit_name[9:].startswith('''tiny'''):
_SCREAMING_SNAKE_CASE =192
_SCREAMING_SNAKE_CASE =768
_SCREAMING_SNAKE_CASE =12
_SCREAMING_SNAKE_CASE =3
elif deit_name[9:].startswith('''small'''):
_SCREAMING_SNAKE_CASE =384
_SCREAMING_SNAKE_CASE =1536
_SCREAMING_SNAKE_CASE =12
_SCREAMING_SNAKE_CASE =6
if deit_name[9:].startswith('''base'''):
pass
elif deit_name[4:].startswith('''large'''):
_SCREAMING_SNAKE_CASE =1024
_SCREAMING_SNAKE_CASE =4096
_SCREAMING_SNAKE_CASE =24
_SCREAMING_SNAKE_CASE =16
# load original model from timm
_SCREAMING_SNAKE_CASE =timm.create_model(a__ ,pretrained=a__)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE =timm_model.state_dict()
_SCREAMING_SNAKE_CASE =create_rename_keys(a__ ,a__)
for src, dest in rename_keys:
rename_key(a__ ,a__ ,a__)
read_in_q_k_v(a__ ,a__ ,a__)
# load HuggingFace model
_SCREAMING_SNAKE_CASE =DeiTForImageClassificationWithTeacher(a__).eval()
model.load_state_dict(a__)
# Check outputs on an image, prepared by DeiTImageProcessor
_SCREAMING_SNAKE_CASE =int(
(256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_SCREAMING_SNAKE_CASE =DeiTImageProcessor(size=a__ ,crop_size=config.image_size)
_SCREAMING_SNAKE_CASE =image_processor(images=prepare_img() ,return_tensors='''pt''')
_SCREAMING_SNAKE_CASE =encoding['''pixel_values''']
_SCREAMING_SNAKE_CASE =model(a__)
_SCREAMING_SNAKE_CASE =timm_model(a__)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ ,outputs.logits ,atol=1e-3)
Path(a__).mkdir(exist_ok=a__)
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a__)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(a__)
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case_ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 721
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCAmelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCAmelCase = "document_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = VisionEncoderDecoderModel
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self : Any , *_a : int , **_a : Dict ) -> int:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def __UpperCamelCase ( self : Optional[Any] , _a : "Image" , _a : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_SCREAMING_SNAKE_CASE =task_prompt.replace('''{user_input}''' , _a )
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
_SCREAMING_SNAKE_CASE =self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any] ) -> int:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def __UpperCamelCase ( self : Any , _a : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.pre_processor.batch_decode(_a )[0]
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_SCREAMING_SNAKE_CASE =re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenajson(_a )
return sequence["answer"]
| 191
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
def UpperCAmelCase ( snake_case : Union[tf.Tensor, np.ndarray] ):
if isinstance(snake_case , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase:List[str] = tf.shape(snake_case )
if tensor.shape == tf.TensorShape(snake_case ):
return dynamic
_lowerCAmelCase:Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case )]
def UpperCAmelCase ( snake_case : tf.Tensor , snake_case : Optional[int] = None , snake_case : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=snake_case , name=snake_case )
def UpperCAmelCase ( snake_case : Any , snake_case : Any , snake_case : Optional[Any] , snake_case : List[str]=1e-5 , snake_case : Any=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case , snake_case ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = tf.nn.moments(snake_case , axes=[axis] , keepdims=snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase:Optional[int] = [1] * inputs.shape.rank
_lowerCAmelCase:List[str] = shape_list(snake_case )[axis]
_lowerCAmelCase:str = tf.reshape(snake_case , snake_case )
_lowerCAmelCase:List[Any] = tf.reshape(snake_case , snake_case )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase:Union[str, Any] = tf.nn.batch_normalization(
snake_case , snake_case , snake_case , offset=snake_case , scale=snake_case , variance_epsilon=snake_case , )
return outputs
def UpperCAmelCase ( snake_case : Dict , snake_case : str=0 , snake_case : List[Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase:Optional[int] = tf.shape(snake_case )
_lowerCAmelCase:int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase:Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case , snake_case )
def UpperCAmelCase ( snake_case : tf.Tensor ):
if not isinstance(snake_case , tf.Tensor ):
_lowerCAmelCase:str = tf.convert_to_tensor(snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase:Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase:int = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase:str = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase ( snake_case : tf.Tensor , snake_case : int , snake_case : str = "input_ids" ):
tf.debugging.assert_less(
snake_case , tf.cast(snake_case , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def UpperCAmelCase ( snake_case : Dict , snake_case : Tuple , snake_case : Tuple ):
_lowerCAmelCase:Any = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase:Union[str, Any] = [x for x in data if len(snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
_lowerCAmelCase:Optional[Any] = np.asarray(snake_case )
_lowerCAmelCase:Any = 1
_lowerCAmelCase:Any = np.array_split(snake_case , snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase:int = np.array_split(snake_case , snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case ):
_lowerCAmelCase:Optional[Any] = chunk_data
else:
_lowerCAmelCase:Optional[int] = data
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : Dict ):
if name in group.attrs:
_lowerCAmelCase:List[Any] = [n.decode('''utf8''' ) if hasattr(snake_case , '''decode''' ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase:List[str] = []
_lowerCAmelCase:Any = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase ( snake_case : int ):
def _expand_single_ad_tensor(snake_case : Tuple ):
if isinstance(snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case )
| 227
|
"""simple docstring"""
from datetime import datetime
import requests
def UpperCAmelCase ( snake_case : str ):
_lowerCAmelCase:Optional[Any] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
_lowerCAmelCase:Any = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(snake_case ).content
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter Video/IGTV url: ''').strip()
UpperCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 227
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
__lowercase = {"""target_lang""": """fi""", """source_lang""": """en"""}
__lowercase = """>>zh<<"""
__lowercase = """Helsinki-NLP/"""
if is_torch_available():
__lowercase = """pt"""
elif is_tf_available():
__lowercase = """tf"""
else:
__lowercase = """jax"""
@require_sentencepiece
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : List[Any] = MarianTokenizer
_lowercase : Optional[Any] = False
_lowercase : int = True
def UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
A_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
A_ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
A_ = Path(self.tmpdirname )
save_json(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
A_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : str , **lowerCamelCase__ : Optional[int] ) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
A_ = '''</s>'''
A_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCamelCase__ ) , 9 )
def UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
A_ = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
A_ = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
A_ = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(lowerCamelCase__ , batch.input_ids[0] )
A_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase__ )
A_ = [x.name for x in Path(lowerCamelCase__ ).glob('''*''' )]
self.assertIn('''source.spm''' , lowerCamelCase__ )
MarianTokenizer.from_pretrained(lowerCamelCase__ )
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A_ = self.get_tokenizer()
A_ = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
A_ = self.get_tokenizer()
A_ = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
A_ = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
A_ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
A_ = '''Tämä on testi'''
A_ = '''This is a test'''
A_ = [7_6, 7, 2_0_4_7, 2]
A_ = [6_9, 1_2, 1_1, 9_4_0, 2]
A_ = tokenizer(lowerCamelCase__ ).input_ids
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
A_ = tokenizer(text_target=lowerCamelCase__ ).input_ids
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
A_ = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 563
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowercase = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
__lowercase = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
__lowercase = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return float((preds == labels).mean() )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
A_ = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 563
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _snake_case :
__A : torch.Tensor # [batch_size x 3]
__A : torch.Tensor # [batch_size x 3]
__A : torch.Tensor # [batch_size x 3]
__A : torch.Tensor # [batch_size x 3]
__A : int
__A : int
__A : float
__A : float
__A : Tuple[int]
def UpperCamelCase__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def UpperCamelCase__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = torch.arange(self.height * self.width )
UpperCAmelCase_ : Tuple = torch.stack(
[
pixel_indices % self.width,
torch.div(_snake_case ,self.width ,rounding_mode="trunc" ),
] ,axis=1 ,)
return coords
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , *UpperCAmelCase_ : Optional[int] = self.shape
UpperCAmelCase_ : Dict = int(np.prod(_snake_case ) )
UpperCAmelCase_ : Optional[int] = self.get_image_coords()
UpperCAmelCase_ : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Dict = self.get_camera_rays(_snake_case )
UpperCAmelCase_ : int = rays.view(_snake_case ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_snake_case ,-1 ,2 )
UpperCAmelCase_ : Optional[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : str = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : str = fracs.view(_snake_case ,-1 ,2 )
UpperCAmelCase_ : Optional[Any] = (
self.z.view(_snake_case ,1 ,3 )
+ self.x.view(_snake_case ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(_snake_case ,1 ,3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : List[Any] = directions / directions.norm(dim=-1 ,keepdim=_snake_case )
UpperCAmelCase_ : str = torch.stack(
[
torch.broadcast_to(self.origin.view(_snake_case ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(_snake_case ,*_snake_case ,2 ,3 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=_snake_case ,height=_snake_case ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(_SCREAMING_SNAKE_CASE ), np.cos(_SCREAMING_SNAKE_CASE ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[Any] = -z * 4
UpperCAmelCase_ : List[str] = np.array([np.cos(_SCREAMING_SNAKE_CASE ), -np.sin(_SCREAMING_SNAKE_CASE ), 0.0] )
UpperCAmelCase_ : Dict = np.cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
origins.append(_SCREAMING_SNAKE_CASE )
xs.append(_SCREAMING_SNAKE_CASE )
ys.append(_SCREAMING_SNAKE_CASE )
zs.append(_SCREAMING_SNAKE_CASE )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_SCREAMING_SNAKE_CASE )) , )
| 71
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {"""vocab_file""": """spiece.model"""}
A = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
A = {"""bert_for_seq_generation""": 512}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = []
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Dict = vocab_file
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase_)
@property
def a_ ( self : List[str]):
"""simple docstring"""
return self.sp_model.get_piece_size()
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.__dict__.copy()
__UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self : Any , UpperCamelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_)
return token
def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_) + token
__UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase_)
out_string += self.sp_model.decode(UpperCamelCase_)
return out_string.strip()
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase_ , "wb") as fi:
__UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_)
return (out_vocab_file,)
| 77
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : UNetaDModel
__lowerCamelCase : ScoreSdeVeScheduler
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 2_000 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ : Any = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ : List[Any] = self.unet
SCREAMING_SNAKE_CASE_ : Optional[Any] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE_ : int = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE_ : List[Any] = model(__lowerCAmelCase , __lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE_ : str = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Any = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 311
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = 'ml.p3.2xlarge'
__lowerCamelCase : Optional[int] = 'accelerate_sagemaker_execution_role'
__lowerCamelCase : int = 'hf-sm'
__lowerCamelCase : Any = 'us-east-1'
__lowerCamelCase : Tuple = 1
__lowerCamelCase : str = 'accelerate-sagemaker-1'
__lowerCamelCase : Optional[Any] = '1.6'
__lowerCamelCase : Tuple = '4.4'
__lowerCamelCase : List[str] = 'train.py'
__lowerCamelCase : Optional[int] = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
__lowerCamelCase : str = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class snake_case_ ( unittest.TestCase ):
def __A ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE_ : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , __lowerCAmelCase )
assert isinstance(converted_args['do_train'] , __lowerCAmelCase )
assert isinstance(converted_args['epochs'] , __lowerCAmelCase )
assert isinstance(converted_args['learning_rate'] , __lowerCAmelCase )
assert isinstance(converted_args['max_steps'] , __lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 311
| 1
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = 50_000_000 ):
__lowerCamelCase : Dict = set()
__lowerCamelCase : Union[str, Any] = int((limit - 24) ** (1 / 2) )
__lowerCamelCase : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
__lowerCamelCase : Tuple = primea * primea
for primea in primes:
__lowerCamelCase : Tuple = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCamelCase : List[Any] = primea * primea * primea * primea
__lowerCamelCase : int = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669
| 1
|
'''simple docstring'''
from typing import Any
class __lowerCAmelCase:
def __init__( self : int , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = data
SCREAMING_SNAKE_CASE_ :int = None
class __lowerCAmelCase:
def __init__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = None
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = self.head
while temp is not None:
print(temp.data , end=' ' )
SCREAMING_SNAKE_CASE_ :Optional[int] = temp.next
print()
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = Node(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = self.head
SCREAMING_SNAKE_CASE_ :Optional[int] = new_node
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE_ :str = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = node_a.next
SCREAMING_SNAKE_CASE_ :Optional[int] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE_ :Any = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 233
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Tuple = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
SCREAMING_SNAKE_CASE_ :int = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Tuple = str(SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = DatasetInfo.from_directory(SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , 'dataset_info.json' ) )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
SCREAMING_SNAKE_CASE_ :Dict = dataset_info._to_yaml_dict()
assert sorted(SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
SCREAMING_SNAKE_CASE_ :int = yaml.safe_dump(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = yaml.safe_load(SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :List[Any] = DatasetInfo()
SCREAMING_SNAKE_CASE_ :Optional[int] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :str = str(SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE_ :List[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE_ :Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , 'README.md' ) )
| 233
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCamelCase : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
UpperCAmelCase = val
def lowerCamelCase_(lowerCamelCase_ ) -> List[Any]:
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase = value
else:
UpperCAmelCase = value
return new_state_dict
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_=False ) -> List[str]:
UpperCAmelCase = ""
if is_panoptic:
UpperCAmelCase = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[:256, :]
UpperCAmelCase = in_proj_bias[:256]
UpperCAmelCase = in_proj_weight[256:512, :]
UpperCAmelCase = in_proj_bias[256:512]
UpperCAmelCase = in_proj_weight[-256:, :]
UpperCAmelCase = in_proj_bias[-256:]
def lowerCamelCase_() -> int:
UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
UpperCAmelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase = "resnet101"
if "dc5" in model_name:
UpperCAmelCase = True
UpperCAmelCase = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase = 250
else:
UpperCAmelCase = 91
UpperCAmelCase = "huggingface/label-files"
UpperCAmelCase = "coco-detection-id2label.json"
UpperCAmelCase = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase = ConditionalDetrImageProcessor(format=lowerCamelCase_ )
# prepare image
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCamelCase_ , return_tensors="pt" )
UpperCAmelCase = encoding["pixel_values"]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
UpperCAmelCase = torch.hub.load("DeppMeng/ConditionalDETR" , lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
UpperCAmelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase = "conditional_detr." + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = rename_backbone_keys(lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
UpperCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
UpperCAmelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
UpperCAmelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase = ConditionalDetrForSegmentation(lowerCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase = conditional_detr(lowerCamelCase_ )
UpperCAmelCase = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCamelCase : Dict = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 323
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __magic_name__ ( A__ ):
lowercase : Union[str, Any] ='''mra'''
def __init__( self : Tuple , UpperCamelCase__ : Any=5_02_65 , UpperCamelCase__ : int=7_68 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Tuple=30_72 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=5_12 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : str=4 , UpperCamelCase__ : Tuple="full" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=2 , **UpperCamelCase__ : int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = block_per_row
UpperCAmelCase = approx_mode
UpperCAmelCase = initial_prior_first_n_blocks
UpperCAmelCase = initial_prior_diagonal_n_blocks
| 323
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Optional[int] = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : List[str] = "beit"
def __init__( self , __magic_name__=8_1_9_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=2_2_4 , __magic_name__=1_6 , __magic_name__=3 , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=[3, 5, 7, 1_1] , __magic_name__=[1, 2, 3, 6] , __magic_name__=True , __magic_name__=0.4 , __magic_name__=2_5_6 , __magic_name__=1 , __magic_name__=False , __magic_name__=2_5_5 , **__magic_name__ , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = use_mask_token
_lowerCAmelCase = use_absolute_position_embeddings
_lowerCAmelCase = use_relative_position_bias
_lowerCAmelCase = use_shared_relative_position_bias
_lowerCAmelCase = layer_scale_init_value
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase = out_indices
_lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase = use_auxiliary_head
_lowerCAmelCase = auxiliary_loss_weight
_lowerCAmelCase = auxiliary_channels
_lowerCAmelCase = auxiliary_num_convs
_lowerCAmelCase = auxiliary_concat_input
_lowerCAmelCase = semantic_loss_ignore_index
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Union[str, Any] = version.parse("1.11" )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 1e-4
| 309
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a__ : List[str] = """docs/source/en/_toctree.yml"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(__lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__lowerCamelCase, key=lambda __lowerCamelCase : s["title"].lower() )
def A__ ( __lowerCamelCase=False ):
"""simple docstring"""
with open(__lowerCamelCase, encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCAmelCase = api_doc[model_idx]['sections']
_lowerCAmelCase = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if 'sections' in section]
_lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
_lowerCAmelCase = modality_doc['sections']
_lowerCAmelCase = clean_model_doc_toc(__lowerCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
_lowerCAmelCase = model_doc
_lowerCAmelCase = api_doc
with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase, allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 309
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=3_0 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ) -> List[Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
__snake_case = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 2
def _a ( self) -> Tuple:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> Any:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = DeiTModel(config=_lowercase)
model.to(_lowercase)
model.eval()
__snake_case = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = DeiTForMaskedImageModeling(config=_lowercase)
model.to(_lowercase)
model.eval()
__snake_case = model(_lowercase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__snake_case = 1
__snake_case = DeiTForMaskedImageModeling(_lowercase)
model.to(_lowercase)
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case = model(_lowercase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self.type_sequence_label_size
__snake_case = DeiTForImageClassification(_lowercase)
model.to(_lowercase)
model.eval()
__snake_case = model(_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case = 1
__snake_case = DeiTForImageClassification(_lowercase)
model.to(_lowercase)
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case = model(_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _a ( self) -> int:
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__UpperCAmelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> int:
__snake_case = DeiTModelTester(self)
__snake_case = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=3_7)
def _a ( self) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def _a ( self) -> Optional[Any]:
pass
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear))
def _a ( self) -> Dict:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_lowercase)
__snake_case = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase)
def _a ( self) -> str:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase)
def _a ( self) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase)
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> int:
__snake_case = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self) -> List[Any]:
if not self.model_tester.is_training:
return
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowercase)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__snake_case = model_class(_lowercase)
model.to(_lowercase)
model.train()
__snake_case = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
__snake_case = model(**_lowercase).loss
loss.backward()
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__snake_case = False
__snake_case = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__snake_case = model_class(_lowercase)
model.gradient_checkpointing_enable()
model.to(_lowercase)
model.train()
__snake_case = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
__snake_case = model(**_lowercase).loss
loss.backward()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowercase),
*get_values(_lowercase),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}"):
__snake_case = problem_type["title"]
__snake_case = problem_type["num_labels"]
__snake_case = model_class(_lowercase)
model.to(_lowercase)
model.train()
__snake_case = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
if problem_type["num_labels"] > 1:
__snake_case = inputs["labels"].unsqueeze(1).repeat(1 , problem_type['num_labels'])
__snake_case = inputs["labels"].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowercase) as warning_list:
__snake_case = model(**_lowercase).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}")
loss.backward()
@slow
def _a ( self) -> Any:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DeiTModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def A ( ) -> Any:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> int:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def _a ( self) -> int:
__snake_case = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
_lowercase)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowercase , return_tensors='pt').to(_lowercase)
# forward pass
with torch.no_grad():
__snake_case = model(**_lowercase)
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _lowercase)
__snake_case = torch.tensor([-1.0266, 0.1912, -1.2861]).to(_lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def _a ( self) -> List[Any]:
__snake_case = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_lowercase , return_tensors='pt')
__snake_case = inputs.pixel_values.to(_lowercase)
# forward pass to make sure inference works in fp16
with torch.no_grad():
__snake_case = model(_lowercase)
| 313
|
"""simple docstring"""
from random import randint, random
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: int , lowerCamelCase_: bool = False , lowerCamelCase_: bool = False , lowerCamelCase_: int = 5 , ):
"""simple docstring"""
snake_case : str = [[-1] * number_of_cells] # Create a highway without any car
snake_case : str = 0
snake_case : Any = max(lowerCamelCase_ , 0 )
while i < number_of_cells:
snake_case : Optional[int] = (
randint(0 , lowerCamelCase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : Any = 0
snake_case : int = highway_now[car_index + 1 :]
for cell in range(len(lowerCamelCase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCamelCase_ , -1 )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: float , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : List[str] = len(lowerCamelCase_ )
# Beforce calculations, the highway is empty
snake_case : Dict = [-1] * number_of_cells
for car_index in range(lowerCamelCase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
snake_case : Tuple = min(highway_now[car_index] + 1 , lowerCamelCase_ )
# Number of empty cell before the next car
snake_case : int = get_distance(lowerCamelCase_ , lowerCamelCase_ ) - 1
# We can't have the car causing an accident
snake_case : str = min(next_highway[car_index] , lowerCamelCase_ )
if random() < probability:
# Randomly, a driver will slow down
snake_case : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: list , lowerCamelCase_: int , lowerCamelCase_: float , lowerCamelCase_: int ):
"""simple docstring"""
snake_case : Optional[Any] = len(highway[0] )
for i in range(lowerCamelCase_ ):
snake_case : Union[str, Any] = update(highway[i] , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[int] = [-1] * number_of_cells
for car_index in range(lowerCamelCase_ ):
snake_case : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
snake_case : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
snake_case : Tuple = speed
highway.append(lowerCamelCase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = None
_a = BloomTokenizerFast
_a = BloomTokenizerFast
_a = True
_a = False
_a = '''tokenizer_file'''
_a = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def SCREAMING_SNAKE_CASE ( self: Tuple ):
super().setUp()
lowercase :List[str] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self: List[str] , **_lowerCAmelCase: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :List[str] = self.get_rust_tokenizer()
lowercase :Optional[int] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowercase :Dict = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowercase :int = tokenizer.batch_encode_plus(_lowerCAmelCase )["input_ids"]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Tuple = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: int=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase :Dict = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase :Any = "This is a simple input"
lowercase :Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowercase :Tuple = ("This is a simple input", "This is a pair")
lowercase :List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowercase :Dict = None # Hotfixing padding = None
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Dict = self.get_rust_tokenizer()
lowercase :int = load_dataset("xnli" , "all_languages" , split="test" , streaming=_lowerCAmelCase )
lowercase :Tuple = next(iter(_lowerCAmelCase ) )["premise"] # pick up one data
lowercase :Optional[Any] = list(sample_data.values() )
lowercase :Optional[int] = list(map(tokenizer.encode , _lowerCAmelCase ) )
lowercase :List[str] = [tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 702
|
_UpperCAmelCase : Union[str, Any] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 453
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = "▁"
SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE : Optional[int] = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
def __init__( self, _lowercase, _lowercase="<pad>", _lowercase="</s>", _lowercase="<unk>", _lowercase="<mask_2>", _lowercase="<mask_1>", _lowercase=None, _lowercase=103, _lowercase = None, **_lowercase, ) -> None:
SCREAMING_SNAKE_CASE_ = offset
if additional_special_tokens is not None:
if not isinstance(_lowercase, _lowercase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(_lowercase )}, but is"""
f""" {type(_lowercase )}""" )
SCREAMING_SNAKE_CASE_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(_lowercase ), self.offset - 1 )
]
if len(set(_lowercase ) ) != len(_lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE_ = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2, self.offset )]
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowercase, unk_token=_lowercase, mask_token=_lowercase, pad_token=_lowercase, mask_token_sent=_lowercase, offset=_lowercase, additional_special_tokens=_lowercase, sp_model_kwargs=self.sp_model_kwargs, **_lowercase, )
SCREAMING_SNAKE_CASE_ = mask_token_sent
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
@property
def a__ ( self ) -> int:
return len(self.sp_model ) + self.offset
def a__ ( self ) -> Dict[str, int]:
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self, _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self, _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase, out_type=_lowercase )
def a__ ( self, _lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE_ = self.sp_model.piece_to_id(_lowercase )
return sp_id + self.offset
def a__ ( self, _lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE_ = self.sp_model.IdToPiece(index - self.offset )
return token
def a__ ( self, _lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase ) + token
SCREAMING_SNAKE_CASE_ = []
else:
current_sub_tokens.append(_lowercase )
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def a__ ( self, _lowercase=False ) -> Dict:
return 1
def a__ ( self, _lowercase ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def a__ ( self, _lowercase, _lowercase = None, _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_lowercase )
elif token_ids_a is None:
return self._special_token_mask(_lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a__ ( self, _lowercase, _lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self, _lowercase, _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowercase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase, 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
| 294
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE : int = get_tests_dir("fixtures")
SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/dummy-config.json")
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = 0
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> Any:
with self.assertRaisesRegex(
_lowercase, 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('bert-base' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_lowercase, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase, revision='aaaaaa' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_lowercase, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase, trust_remote_code=_lowercase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
def a__ ( self ) -> int:
try:
AutoConfig.register('custom', _lowercase )
AutoFeatureExtractor.register(_lowercase, _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoFeatureExtractor.register(_lowercase, _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> str:
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = True
try:
AutoConfig.register('custom', _lowercase )
AutoFeatureExtractor.register(_lowercase, _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_lowercase, 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 294
| 1
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class a :
def __init__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =psutil.Process()
SCREAMING_SNAKE_CASE_: Tuple =False
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =-1
while True:
SCREAMING_SNAKE_CASE_: Optional[int] =max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: int =threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
self.thread.start()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase = PeakCPUMemory()
def __magic_name__ ( ):
# Time
SCREAMING_SNAKE_CASE_: Optional[int] ={"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_: List[Any] =psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_: List[Any] =torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def __magic_name__ ( lowercase ):
# Time
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_: Any =(psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
SCREAMING_SNAKE_CASE_: Dict =(cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_: Dict =(torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
SCREAMING_SNAKE_CASE_: Optional[int] =(torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def __magic_name__ ( lowercase , lowercase ):
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB''' )
SCREAMING_SNAKE_CASE_: int =measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 36
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 1
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = TapasConfig.from_json_file(snake_case )
# set absolute/relative position embeddings parameter
snake_case_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ = TapasForQuestionAnswering(config=snake_case )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = True
# hparam_utils.py hparams
snake_case_ = 0.664694
snake_case_ = 0.207951
snake_case_ = 0.121194
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = 0.0352513
snake_case_ = TapasForQuestionAnswering(config=snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = False
# hparam_utils.py hparams
snake_case_ = 36.4519
snake_case_ = 0.903421
snake_case_ = 222.088
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 0.763141
snake_case_ = TapasForQuestionAnswering(config=snake_case )
elif task == "TABFACT":
snake_case_ = TapasForSequenceClassification(config=snake_case )
elif task == "MLM":
snake_case_ = TapasForMaskedLM(config=snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ = TapasModel(config=snake_case )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(snake_case , snake_case , snake_case )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
snake_case_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + "vocab.txt" , model_max_length=5_1_2 )
tokenizer.save_pretrained(snake_case )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 400
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a ( unittest.TestCase ):
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : str=32 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : str=512 , lowerCAmelCase : Any=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Dict=4 , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =parent
SCREAMING_SNAKE_CASE_: Dict =batch_size
SCREAMING_SNAKE_CASE_: str =seq_length
SCREAMING_SNAKE_CASE_: int =is_training
SCREAMING_SNAKE_CASE_: Dict =use_attention_mask
SCREAMING_SNAKE_CASE_: List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE_: Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE_: Any =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict =num_attention_heads
SCREAMING_SNAKE_CASE_: str =intermediate_size
SCREAMING_SNAKE_CASE_: int =hidden_act
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE_: str =type_vocab_size
SCREAMING_SNAKE_CASE_: str =type_sequence_label_size
SCREAMING_SNAKE_CASE_: str =initializer_range
SCREAMING_SNAKE_CASE_: str =num_choices
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Any =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: str =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Tuple =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase , )
return config, input_ids, attention_mask
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class_name.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE_: List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
@require_flax
class a ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE_: Tuple =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_: Any =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] =(1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
| 409
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ : List[Any] = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def A_( A , A , A=8 ):
UpperCAmelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCamelCase ( A_ ):
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : UNetaDConditionModel , __lowercase : DDPMScheduler , __lowercase : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowercase , scheduler=__lowercase , movq=__lowercase , )
UpperCAmelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : str , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] , __lowercase : Dict ):
'''simple docstring'''
if latents is None:
UpperCAmelCase_ = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ = latents.to(__lowercase )
UpperCAmelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowercase : int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase_ = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowercase : int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase_ = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ = cpu_offload_with_hook(__lowercase , __lowercase , prev_module_hook=__lowercase )
# We'll offload the last model manually.
UpperCAmelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowercase )
def __call__( self : Union[str, Any] , __lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowercase : int = 5_12 , __lowercase : int = 5_12 , __lowercase : int = 1_00 , __lowercase : float = 4.0 , __lowercase : int = 1 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , ):
'''simple docstring'''
UpperCAmelCase_ = self._execution_device
UpperCAmelCase_ = guidance_scale > 1.0
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ = torch.cat(__lowercase , dim=0 )
UpperCAmelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase_ = torch.cat(__lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ = image_embeds.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase_ = negative_image_embeds.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
self.scheduler.set_timesteps(__lowercase , device=__lowercase )
UpperCAmelCase_ = self.scheduler.timesteps
UpperCAmelCase_ = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ = downscale_height_and_width(__lowercase , __lowercase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowercase , __lowercase , __lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ = {"""image_embeds""": image_embeds}
UpperCAmelCase_ = self.unet(
sample=__lowercase , timestep=__lowercase , encoder_hidden_states=__lowercase , added_cond_kwargs=__lowercase , return_dict=__lowercase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ = variance_pred.chunk(2 )
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(
__lowercase , __lowercase , __lowercase , generator=__lowercase , )[0]
# post-processing
UpperCAmelCase_ = self.movq.decode(__lowercase , force_not_quantize=__lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ = image * 0.5 + 0.5
UpperCAmelCase_ = image.clamp(0 , 1 )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 486
|
from math import factorial
UpperCamelCase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A_( A ):
if not isinstance(A , A ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A ) )
def A_( A = 60 , A = 1000000 ):
if not isinstance(A , A ) or not isinstance(A , A ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
UpperCAmelCase_ = 0
# the cached sizes of the previous chains
UpperCAmelCase_ = {}
for start_chain_element in range(1 , A ):
# The temporary set will contain the elements of the chain
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A )
chain_set_length += 1
UpperCAmelCase_ = digit_factorial_sum(A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
| 486
| 1
|
"""simple docstring"""
from string import ascii_uppercase
A: str = {char: i for i, char in enumerate(ascii_uppercase)}
A: Any = dict(enumerate(ascii_uppercase))
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = len(UpperCamelCase )
UpperCAmelCase : List[str] = 0
while True:
if x == i:
UpperCAmelCase : Dict = 0
if len(UpperCamelCase ) == len(UpperCamelCase ):
break
key += key[i]
i += 1
return key
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Tuple = """"""
UpperCAmelCase : Tuple = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCAmelCase : Any = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : str = """"""
UpperCAmelCase : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCAmelCase : List[str] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
UpperCAmelCase : Tuple = """THE GERMAN ATTACK"""
UpperCAmelCase : Optional[Any] = """SECRET"""
UpperCAmelCase : Any = generate_key(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Optional[int] = cipher_text(UpperCamelCase , UpperCamelCase )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(UpperCamelCase , UpperCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 160
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Optional[int] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = 'nllb-moe'
__lowerCAmelCase : List[Any] = ['past_key_values']
__lowerCAmelCase : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=128112 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="float32" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE="all" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : str = d_model
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : Dict = activation_function
UpperCAmelCase : int = init_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : Optional[Any] = decoder_layerdrop
UpperCAmelCase : str = use_cache
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Optional[Any] = router_z_loss_coef
UpperCAmelCase : List[str] = router_aux_loss_coef
UpperCAmelCase : str = decoder_sparse_step
UpperCAmelCase : str = encoder_sparse_step
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : Optional[int] = expert_capacity
UpperCAmelCase : List[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
UpperCAmelCase : int = router_dtype
UpperCAmelCase : Optional[int] = router_ignore_padding_tokens
UpperCAmelCase : Tuple = batch_prioritized_routing
UpperCAmelCase : Any = second_expert_policy
UpperCAmelCase : List[str] = normalize_router_prob_before_dropping
UpperCAmelCase : str = moe_eval_capacity_token_fraction
UpperCAmelCase : Union[str, Any] = moe_token_dropout
UpperCAmelCase : Any = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 160
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( A ) -> bool:
if len(A ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
lowercase : Optional[int] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
def _A ( A ) -> bool:
return str(A ) == str(A )[::-1]
def _A ( A ) -> int:
return int(A ) + int(str(A )[::-1] )
def _A ( A = 1_0_0_0_0 ) -> int:
lowercase : List[Any] = []
for num in range(1 ,A ):
lowercase : str = 0
lowercase : Optional[Any] = num
while iterations < 5_0:
lowercase : Optional[Any] = sum_reverse(A )
iterations += 1
if is_palindrome(A ):
break
else:
lychrel_nums.append(A )
return len(A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 425
| 0
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_snake_case = 300 # TEMPERATURE (unit = K)
def _A ( snake_case , snake_case , snake_case , ) -> float:
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=8 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=16 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=36 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
"""simple docstring"""
_lowercase : Tuple = parent
_lowercase : Optional[int] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : List[str] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : str = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : str = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : str = type_sequence_label_size
_lowercase : List[str] = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Optional[int] = scope
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Dict = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = self.get_config()
_lowercase : Any = 300
return config
def _lowerCamelCase ( self ):
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = self.prepare_config_and_inputs()
_lowercase : List[str] = True
_lowercase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = MraModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_lowercase : Tuple = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
_lowercase : Optional[int] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
_lowercase : Any = True
_lowercase : Dict = MraModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : str = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
_lowercase : Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
_lowercase : Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = MraForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = MraForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Tuple = MraForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : List[str] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : List[Any] = MraForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : Dict = MraForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[Any] = ()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = MraModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Any = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[Any] = MraModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason="MRA does not output attentions" )
def _lowerCamelCase ( self ):
"""simple docstring"""
return
@require_torch
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_lowercase : List[str] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Dict = model(_UpperCamelCase )[0]
_lowercase : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
_lowercase : int = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_lowercase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Optional[Any] = model(_UpperCamelCase )[0]
_lowercase : Tuple = 50265
_lowercase : Dict = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
_lowercase : List[Any] = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_lowercase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Union[str, Any] = model(_UpperCamelCase )[0]
_lowercase : List[str] = 50265
_lowercase : Dict = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
_lowercase : Union[str, Any] = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 245
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict ) -> Tuple:
__snake_case = UniSpeechSatForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
__snake_case = downstream_dict['''projector.weight''']
__snake_case = downstream_dict['''projector.bias''']
__snake_case = downstream_dict['''model.post_net.linear.weight''']
__snake_case = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int , snake_case_ : Dict ) -> Union[str, Any]:
__snake_case = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
__snake_case = downstream_dict['''model.linear.weight''']
__snake_case = downstream_dict['''model.linear.bias''']
return model
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : int ) -> Optional[int]:
__snake_case = UniSpeechSatForXVector.from_pretrained(snake_case_ , config=snake_case_ )
__snake_case = downstream_dict['''connector.weight''']
__snake_case = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__snake_case = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__snake_case = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__snake_case = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : str , snake_case_ : List[str] ) -> Union[str, Any]:
__snake_case = torch.load(snake_case_ , map_location='''cpu''' )
__snake_case = checkpoint['''Downstream''']
__snake_case = UniSpeechSatConfig.from_pretrained(snake_case_ )
__snake_case = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
__snake_case = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__snake_case = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__snake_case = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('''ForXVector''' ):
__snake_case = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__snake_case = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
snake_case_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 388
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Any ) -> Optional[Any]:
__snake_case = checkpoint
__snake_case = {}
__snake_case = vae_state_dict['''encoder.conv_in.weight''']
__snake_case = vae_state_dict['''encoder.conv_in.bias''']
__snake_case = vae_state_dict['''encoder.conv_out.weight''']
__snake_case = vae_state_dict['''encoder.conv_out.bias''']
__snake_case = vae_state_dict['''encoder.norm_out.weight''']
__snake_case = vae_state_dict['''encoder.norm_out.bias''']
__snake_case = vae_state_dict['''decoder.conv_in.weight''']
__snake_case = vae_state_dict['''decoder.conv_in.bias''']
__snake_case = vae_state_dict['''decoder.conv_out.weight''']
__snake_case = vae_state_dict['''decoder.conv_out.bias''']
__snake_case = vae_state_dict['''decoder.norm_out.weight''']
__snake_case = vae_state_dict['''decoder.norm_out.bias''']
__snake_case = vae_state_dict['''quant_conv.weight''']
__snake_case = vae_state_dict['''quant_conv.bias''']
__snake_case = vae_state_dict['''post_quant_conv.weight''']
__snake_case = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
__snake_case = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__snake_case = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
__snake_case = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
__snake_case = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
__snake_case = num_up_blocks - 1 - i
__snake_case = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
__snake_case = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
__snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__snake_case = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
__snake_case = renew_vae_resnet_paths(snake_case_ )
__snake_case = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
__snake_case = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__snake_case = renew_vae_attention_paths(snake_case_ )
__snake_case = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , ) -> int:
# Only support V1
__snake_case = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__snake_case = io.BytesIO(r.content )
__snake_case = OmegaConf.load(snake_case_ )
__snake_case = 512
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__snake_case = {}
with safe_open(snake_case_ , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__snake_case = f.get_tensor(snake_case_ )
else:
__snake_case = torch.load(snake_case_ , map_location=snake_case_ )['''state_dict''']
# Convert the VAE model.
__snake_case = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
__snake_case = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
__snake_case = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
snake_case_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 388
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->str:
snake_case_ = inspect.getfile(accelerate.test_utils )
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def snake_case__( self : str ) ->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case__( self : Union[str, Any] ) ->Any:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case__( self : List[Any] ) ->Tuple:
self.test_metrics.main()
@require_multi_gpu
def snake_case__( self : Any ) ->Union[str, Any]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
snake_case_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
| 39
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase__ : Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
for attribute in key.split("." ):
snake_case_ = getattr(_A , _A )
if weight_type is not None:
snake_case_ = getattr(_A , _A ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ = None
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == "group" , )
snake_case_ = True
elif name.split("." )[0] == "proj":
snake_case_ = fairseq_model.proj
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_A )[0].split("." )[-2]
snake_case_ = mapped_key.replace("*" , _A )
if "weight_g" in name:
snake_case_ = "weight_g"
elif "weight_v" in name:
snake_case_ = "weight_v"
elif "bias" in name:
snake_case_ = "bias"
elif "weight" in name:
snake_case_ = "weight"
else:
snake_case_ = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = full_name.split("conv_layers." )[-1]
snake_case_ = name.split("." )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(_A , _A , bias=_A )
snake_case_ = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.split(" " )[0] for line in lines]
snake_case_ = len(_A )
snake_case_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
snake_case_ = WavaVecaConfig.from_pretrained(_A )
snake_case_ = SpeechaTextaConfig.from_pretrained(
_A , vocab_size=_A , decoder_layers=_A , do_stable_layer_norm=_A )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case_ = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ = WavaVecaModel(_A )
snake_case_ = recursively_load_weights_wavaveca(model.encoder , _A )
snake_case_ = SpeechaTextaForCausalLM(_A )
snake_case_ , snake_case_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A )
# set output linear layer
unexpected_keys.remove("embed_out" )
snake_case_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case_ = SpeechEncoderDecoderModel(encoder=_A , decoder=_A )
snake_case_ = False
# add projection layer
snake_case_ = nn.Parameter(projection_layer.weight )
snake_case_ = nn.Parameter(projection_layer.bias )
snake_case_ = create_vocab_dict(_A )
with open(os.path.join(_A , "vocab.json" ) , "w" ) as fp:
json.dump(_A , _A )
snake_case_ = SpeechaTextaTokenizer(os.path.join(_A , "vocab.json" ) )
tokenizer.save_pretrained(_A )
snake_case_ = hf_wavavec.config.to_dict()
snake_case_ = tokenizer.pad_token_id
snake_case_ = tokenizer.bos_token_id
snake_case_ = tokenizer.eos_token_id
snake_case_ = "speech_to_text_2"
snake_case_ = "wav2vec2"
snake_case_ = SpeechEncoderDecoderConfig.from_dict(_A )
hf_wavavec.save_pretrained(_A )
feature_extractor.save_pretrained(_A )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowercase__ : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 721
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int]=13 , __lowercase : str=7 , __lowercase : str=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=False , __lowercase : str=True , __lowercase : Optional[int]=99 , __lowercase : List[str]=32 , __lowercase : Tuple=5 , __lowercase : int=4 , __lowercase : Union[str, Any]=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : Dict=0.1 , __lowercase : int=0.1 , __lowercase : Optional[Any]=5_12 , __lowercase : Any=16 , __lowercase : int=2 , __lowercase : Dict=0.02 , __lowercase : List[str]=3 , __lowercase : int=4 , __lowercase : str=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Any ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Dict , __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase )
snake_case_ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : int , __lowercase : str , __lowercase : str , __lowercase : List[Any] , __lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
snake_case_ = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : str , __lowercase : Dict , __lowercase : int , __lowercase : Optional[int] , __lowercase : int , __lowercase : str , ):
"""simple docstring"""
snake_case_ = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Tuple , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = True
snake_case_ = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )["hidden_states"][0]
snake_case_ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )["hidden_states"][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = LlamaModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(__lowercase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = "single_label_classification"
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(__lowercase )
snake_case_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = "multi_label_classification"
snake_case_ = input_dict["input_ids"]
snake_case_ = input_ids.ne(1 ).to(__lowercase )
snake_case_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def snake_case__ ( self : Any , __lowercase : Tuple ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ids_tensor([1, 10] , config.vocab_size )
snake_case_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
snake_case_ = original_model(__lowercase ).last_hidden_state
snake_case_ = original_model(__lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = {"type": scaling_type, "factor": 10.0}
snake_case_ = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
snake_case_ = scaled_model(__lowercase ).last_hidden_state
snake_case_ = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
snake_case_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
snake_case_ = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
snake_case_ = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
snake_case_ = model(torch.tensor(__lowercase ) )
snake_case_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1E-2 , rtol=1E-2 )
# fmt: off
snake_case_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
snake_case_ = "Simply put, the theory of relativity states that "
snake_case_ = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
snake_case_ = tokenizer.encode(__lowercase , return_tensors="pt" )
snake_case_ = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__lowercase )
# greedy generation outputs
snake_case_ = model.generate(__lowercase , max_new_tokens=64 , top_p=__lowercase , temperature=1 , do_sample=__lowercase )
snake_case_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
| 139
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar("""T""")
UpperCamelCase_ = TypeVar("""U""")
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
def __init__( self : List[str] , UpperCAmelCase__ : T | None , UpperCAmelCase__ : U | None ):
'''simple docstring'''
lowercase : List[str] =key
lowercase : Optional[Any] =val
lowercase : DoubleLinkedListNode[T, U] | None =None
lowercase : DoubleLinkedListNode[T, U] | None =None
def __repr__( self : Any ):
'''simple docstring'''
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
def __init__( self : int ):
'''simple docstring'''
lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : DoubleLinkedListNode[T, U] =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase , lowercase : Any =self.rear, self.head
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =['''DoubleLinkedList''']
lowercase : Union[str, Any] =self.head
while node.next is not None:
rep.append(str(UpperCAmelCase__ ) )
lowercase : Any =node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
lowercase : Optional[int] =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase : Dict =node
lowercase : str =previous
lowercase : Optional[Any] =node
lowercase : Optional[int] =self.rear
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
lowercase : Any =node.next
lowercase : Optional[int] =node.prev
lowercase : Any =None
lowercase : int =None
return node
class __SCREAMING_SNAKE_CASE ( Generic[T, U] ):
lowerCamelCase_ = {}
def __init__( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : DoubleLinkedList[T, U] =DoubleLinkedList()
lowercase : Union[str, Any] =capacity
lowercase : int =0
lowercase : List[Any] =0
lowercase : Optional[Any] =0
lowercase : dict[T, DoubleLinkedListNode[T, U]] ={}
def __repr__( self : List[str] ):
'''simple docstring'''
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Dict , UpperCAmelCase__ : T ):
'''simple docstring'''
return key in self.cache
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : T ):
'''simple docstring'''
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase : DoubleLinkedListNode[T, U] =self.cache[key]
lowercase : int =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCAmelCase__ )
return node.val
self.miss += 1
return None
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : T , UpperCAmelCase__ : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase : Dict =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCAmelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase : Dict =DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase : Union[str, Any] =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase : Any =value
self.list.add(UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : int = 128 ):
'''simple docstring'''
def cache_decorator_inner(UpperCAmelCase__ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCAmelCase__ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase : Dict =LRUCache(UpperCAmelCase__ )
lowercase : Tuple =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase : Union[str, Any] =func(*UpperCAmelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCAmelCase__ , '''cache_info''' , UpperCAmelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
lowerCamelCase : Any = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowerCamelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["module"]
# Load the entity vocab file
lowerCamelCase : Tuple = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
lowerCamelCase : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCamelCase : Dict = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase : Union[str, Any] = AddedToken("<ent>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = AddedToken("<ent2>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "r" ) as f:
lowerCamelCase : Optional[Any] = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = "MLukeTokenizer"
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(["#"] )[0]
lowerCamelCase : Tuple = state_dict["embeddings.word_embeddings.weight"]
lowerCamelCase : int = word_emb[ent_init_index].unsqueeze(0 )
lowerCamelCase : Dict = word_emb[enta_init_index].unsqueeze(0 )
lowerCamelCase : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase : Union[str, Any] = state_dict[bias_name]
lowerCamelCase : Dict = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCamelCase : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase : Tuple = f"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase : int = state_dict[prefix + matrix_name]
lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
lowerCamelCase : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase : int = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCamelCase : List[Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase : Any = state_dict["entity_predictions.bias"]
lowerCamelCase : Tuple = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCamelCase : Tuple = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
lowerCamelCase : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
lowerCamelCase : Optional[Any] = state_dict[key]
else:
lowerCamelCase : Dict = state_dict[key]
lowerCamelCase , lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase : int = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task="entity_classification" )
lowerCamelCase : int = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
lowerCamelCase : Any = (0, 9)
lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" )
lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase : Any = torch.Size((1, 33, 768) )
lowerCamelCase : Any = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase : Optional[Any] = torch.Size((1, 1, 768) )
lowerCamelCase : Optional[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase : Dict = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = "Tokyo is the capital of <mask>."
lowerCamelCase : Dict = (24, 30)
lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" )
lowerCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = encoding["input_ids"][0].tolist()
lowerCamelCase : List[str] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
lowerCamelCase : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase : List[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = ["[MASK]", "[PAD]", "[UNK]"]
lowerCamelCase : Union[str, Any] = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase : Union[str, Any] = {}
for entry in data:
lowerCamelCase : Optional[Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase : Any = entity_id
break
lowerCamelCase : Optional[Any] = f"""{language}:{entity_name}"""
lowerCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 340
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 706
|
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return number | (1 << position)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return number & ~(1 << position)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return number ^ (1 << position)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
from __future__ import annotations
_lowerCamelCase = 10
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
SCREAMING_SNAKE_CASE__ = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
SCREAMING_SNAKE_CASE__ = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
SCREAMING_SNAKE_CASE__ = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
SCREAMING_SNAKE_CASE__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : Tuple = False
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = 'ybelkada/fonts'
def SCREAMING_SNAKE_CASE__ ( ) -> str:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
requires_backends(lowercase ,["""torch"""] )
_check_torch_version()
snake_case : List[str] = image_tensor.unsqueeze(0 )
snake_case : Tuple = torch.nn.functional.unfold(lowercase ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
snake_case : Optional[int] = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,lowercase ,lowercase ,-1 )
snake_case : List[str] = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 36 ,lowercase = "black" ,lowercase = "white" ,lowercase = 5 ,lowercase = 5 ,lowercase = 5 ,lowercase = 5 ,lowercase = None ,lowercase = None ,) -> Image.Image:
requires_backends(lowercase ,"""vision""" )
# Add new lines so that each line is no more than 80 characters.
snake_case : List[str] = textwrap.TextWrapper(width=80 )
snake_case : List[Any] = wrapper.wrap(text=lowercase )
snake_case : int = """\n""".join(lowercase )
if font_bytes is not None and font_path is None:
snake_case : int = io.BytesIO(lowercase )
elif font_path is not None:
snake_case : str = font_path
else:
snake_case : int = hf_hub_download(lowercase ,"""Arial.TTF""" )
snake_case : Optional[int] = ImageFont.truetype(lowercase ,encoding="""UTF-8""" ,size=lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case : Tuple = ImageDraw.Draw(Image.new("""RGB""" ,(1, 1) ,lowercase ) )
snake_case , snake_case , snake_case , snake_case : Any = temp_draw.textbbox((0, 0) ,lowercase ,lowercase )
# Create the actual image with a bit of padding around the text.
snake_case : Optional[int] = text_width + left_padding + right_padding
snake_case : List[Any] = text_height + top_padding + bottom_padding
snake_case : List[str] = Image.new("""RGB""" ,(image_width, image_height) ,lowercase )
snake_case : List[str] = ImageDraw.Draw(lowercase )
draw.text(xy=(left_padding, top_padding) ,text=lowercase ,fill=lowercase ,font=lowercase )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,**lowercase ) -> Any:
requires_backends(lowercase ,"""vision""" )
# Convert to PIL image if necessary
snake_case : Union[str, Any] = to_pil_image(lowercase )
snake_case : str = render_text(lowercase ,**lowercase )
snake_case : Any = max(header_image.width ,image.width )
snake_case : str = int(image.height * (new_width / image.width) )
snake_case : Union[str, Any] = int(header_image.height * (new_width / header_image.width) )
snake_case : List[str] = Image.new("""RGB""" ,(new_width, new_height + new_header_height) ,"""white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
snake_case : Any = to_numpy_array(lowercase )
if infer_channel_dimension_format(lowercase ) == ChannelDimension.LAST:
snake_case : str = to_channel_dimension_format(lowercase ,ChannelDimension.LAST )
return new_image
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flattened_patches"""]
def __init__( self , A = True , A = True , A = None , A = 2_0_4_8 , A = False , **A , ) -> None:
super().__init__(**A )
snake_case : int = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
snake_case : Optional[Any] = do_normalize
snake_case : List[Any] = do_convert_rgb
snake_case : str = max_patches
snake_case : List[Any] = is_vqa
def UpperCAmelCase ( self , A , A , A , **A ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
snake_case : Tuple = to_channel_dimension_format(A , ChannelDimension.FIRST )
snake_case : Optional[int] = torch.from_numpy(A )
snake_case , snake_case : int = patch_size["""height"""], patch_size["""width"""]
snake_case , snake_case : Tuple = get_image_size(A )
# maximize scale s.t.
snake_case : str = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
snake_case : Dict = max(min(math.floor(scale * image_height / patch_height ) , A ) , 1 )
snake_case : int = max(min(math.floor(scale * image_width / patch_width ) , A ) , 1 )
snake_case : Dict = max(num_feasible_rows * patch_height , 1 )
snake_case : Union[str, Any] = max(num_feasible_cols * patch_width , 1 )
snake_case : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=A , antialias=A , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case : Optional[Any] = torch_extract_patches(A , A , A )
snake_case : List[Any] = patches.shape
snake_case : List[str] = patches_shape[1]
snake_case : int = patches_shape[2]
snake_case : Tuple = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case : Tuple = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
snake_case : Dict = torch.arange(A ).reshape([rows, 1] ).repeat(1 , A ).reshape([rows * columns, 1] )
snake_case : Union[str, Any] = torch.arange(A ).reshape([1, columns] ).repeat(A , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case : Dict = row_ids.to(torch.floataa )
snake_case : Dict = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case : Any = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case : Tuple = torch.nn.functional.pad(A , [0, 0, 0, max_patches - (rows * columns)] ).float()
snake_case : Optional[int] = to_numpy_array(A )
return result
def UpperCAmelCase ( self , A , A = None , **A ) -> np.ndarray:
if image.dtype == np.uinta:
snake_case : Any = image.astype(np.floataa )
# take mean across the whole `image`
snake_case : Optional[int] = np.mean(A )
snake_case : Dict = np.std(A )
snake_case : Optional[Any] = max(A , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(A , mean=A , std=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> ImageInput:
snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : Union[str, Any] = patch_size if patch_size is not None else self.patch_size
snake_case : Optional[int] = max_patches if max_patches is not None else self.max_patches
snake_case : Any = self.is_vqa
if kwargs.get("""data_format""" , A ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : Dict = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
snake_case : List[Any] = [to_numpy_array(A ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
snake_case : str = kwargs.pop("""font_bytes""" , A )
snake_case : Union[str, Any] = kwargs.pop("""font_path""" , A )
if isinstance(A , A ):
snake_case : List[str] = [header_text] * len(A )
snake_case : int = [
render_header(A , header_text[i] , font_bytes=A , font_path=A )
for i, image in enumerate(A )
]
if do_normalize:
snake_case : int = [self.normalize(image=A ) for image in images]
# convert to torch tensor and permute
snake_case : Dict = [
self.extract_flattened_patches(image=A , max_patches=A , patch_size=A )
for image in images
]
# create attention mask in numpy
snake_case : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
snake_case : Tuple = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=A )
return encoded_outputs
| 587
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a : str = random.Random()
def __lowerCamelCase ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase : List[str] = global_rng
UpperCAmelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , A , A=7 , A=400 , A=2000 , A=1 , A=0.0 , A=16000 , A=True , A=True , ) -> Optional[int]:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : str = min_seq_length
UpperCAmelCase : Optional[int] = max_seq_length
UpperCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase : List[Any] = feature_size
UpperCAmelCase : List[str] = padding_value
UpperCAmelCase : Any = sampling_rate
UpperCAmelCase : List[Any] = return_attention_mask
UpperCAmelCase : Union[str, Any] = do_normalize
def _lowercase( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase( self , A=False , A=False ) -> Optional[Any]:
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
UpperCAmelCase : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase : Optional[int] = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = WavaVecaFeatureExtractor
def _lowercase( self ) -> List[str]:
UpperCAmelCase : str = WavaVecaFeatureExtractionTester(self )
def _lowercase( self , A ) -> int:
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def _lowercase( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Any = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCAmelCase : str = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
UpperCAmelCase : Optional[int] = feat_extract(A , return_tensors="""np""" ).input_values
UpperCAmelCase : List[Any] = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase : Optional[int] = np.asarray(A )
UpperCAmelCase : int = feat_extract(A , return_tensors="""np""" ).input_values
UpperCAmelCase : List[str] = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Any = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase : List[Any] = [None, 1600, None]
for max_length, padding in zip(A , A ):
UpperCAmelCase : Dict = feat_extract(A , padding=A , max_length=A , return_tensors="""np""" )
UpperCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : str = range(800 , 1400 , 200 )
UpperCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase : Dict = [None, 1600, None]
for max_length, padding in zip(A , A ):
UpperCAmelCase : List[Any] = feat_extract(A , max_length=A , padding=A )
UpperCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Dict = feat_extract(
A , truncation=A , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
UpperCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Union[str, Any] = feat_extract(
A , truncation=A , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : List[str] = feat_extract(
A , truncation=A , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def _lowercase( self ) -> Tuple:
import torch
UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Tuple = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase : Tuple = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowercase( self ) -> Dict:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCAmelCase : Optional[Any] = WavaVecaConfig.from_pretrained(A )
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 713
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672
| 0
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] )-> Optional[int]:
"""simple docstring"""
_UpperCamelCase = TaConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCamelCase = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
snake_case_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 138
|
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> list:
"""simple docstring"""
if len(_UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
_UpperCamelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> Any:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> Dict:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def lowercase__( _UpperCamelCase : list )-> tuple[list, list, list, list]:
"""simple docstring"""
if len(_UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
_UpperCamelCase = len(_UpperCamelCase )
_UpperCamelCase = matrix_length // 2
_UpperCamelCase = [[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase )]
_UpperCamelCase = [
[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )
]
_UpperCamelCase = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase )]
_UpperCamelCase = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowercase__( _UpperCamelCase : list )-> tuple[int, int]:
"""simple docstring"""
return len(_UpperCamelCase ), len(matrix[0] )
def lowercase__( _UpperCamelCase : list )-> None:
"""simple docstring"""
print("\n".join(str(_UpperCamelCase ) for line in matrix ) )
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> list:
"""simple docstring"""
if matrix_dimensions(_UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = split_matrix(_UpperCamelCase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = split_matrix(_UpperCamelCase )
_UpperCamelCase = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
_UpperCamelCase = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
_UpperCamelCase = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_UpperCamelCase = matrix_addition(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
_UpperCamelCase = matrix_addition(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = matrix_addition(_UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
# construct the new matrix from our 4 quadrants
_UpperCamelCase = []
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase__( _UpperCamelCase : list , _UpperCamelCase : list )-> list:
"""simple docstring"""
if matrix_dimensions(_UpperCamelCase )[1] != matrix_dimensions(_UpperCamelCase )[0]:
_UpperCamelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(_UpperCamelCase )
_UpperCamelCase = matrix_dimensions(_UpperCamelCase )
_UpperCamelCase = matrix_dimensions(_UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_UpperCamelCase = max(*_UpperCamelCase , *_UpperCamelCase )
_UpperCamelCase = int(math.pow(2 , math.ceil(math.loga(_UpperCamelCase ) ) ) )
_UpperCamelCase = matrixa
_UpperCamelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_UpperCamelCase = actual_strassen(_UpperCamelCase , _UpperCamelCase )
# Removing the additional zeros
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
snake_case_ : Optional[int] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
snake_case_ : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 138
| 1
|
from bisect import bisect
from itertools import accumulate
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= sorted(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , key=lambda lowerCAmelCase_ : x[0] / x[1] , reverse=lowerCAmelCase_ )
_A, _A= [i[0] for i in r], [i[1] for i in r]
_A= list(accumulate(lowerCAmelCase_ ) )
_A= bisect(lowerCAmelCase_ , lowerCAmelCase_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase_ = '''pt'''
elif is_tf_available():
UpperCAmelCase_ = '''tf'''
else:
UpperCAmelCase_ = '''jax'''
class lowerCAmelCase ( _a , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] =ByTaTokenizer
_SCREAMING_SNAKE_CASE : Dict =False
def a__ ( self ):
super().setUp()
_A= ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ):
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def a__ ( self , **lowerCAmelCase__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=20 , lowerCAmelCase__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_A= []
for i in range(len(lowerCAmelCase__ ) ):
try:
_A= tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_A= list(filter(lambda lowerCAmelCase__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , lowerCAmelCase__ ) )
_A= list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
_A= toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
_A= toks + toks
# toks_str = [t[1] for t in toks]
_A= [t[0] for t in toks]
# Ensure consistency
_A= tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
_A= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
_A= ' ' + output_txt
_A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_A= tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= 'Unicode €.'
_A= tokenizer(lowerCAmelCase__ )
_A= [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase__ )
# decoding
_A= tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , 'Unicode €.</s>' )
_A= tokenizer('e è é ê ë' )
_A= [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase__ )
# decoding
_A= tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_A= [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_A= tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
_A= list(batch.input_ids.numpy()[0] )
else:
_A= list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_A= tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowerCAmelCase__ )
self.assertIn('attention_mask' , lowerCAmelCase__ )
self.assertNotIn('decoder_input_ids' , lowerCAmelCase__ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase__ )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= [
'Summary of the text.',
'Another summary.',
]
_A= tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def a__ ( self ):
_A= self.ta_base_tokenizer
_A= ['A long paragraph for summarization. </s>']
_A= ['Summary of the text. </s>']
# fmt: off
_A= [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_A= [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_A= tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch['input_ids'][0] )
self.assertEqual(lowerCAmelCase__ , batch['labels'][0] )
def a__ ( self ):
# safety check on max_len default value so we are sure the test works
_A= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_A= tempfile.mkdtemp()
_A= ' He is very happy, UNwant\u00E9d,running'
_A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
_A= tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
_A= after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
_A= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_A= tempfile.mkdtemp()
_A= ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_A= tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_A= tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
_A= tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
_A= after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A= tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def a__ ( self ):
_A= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_A= json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_A= json.load(lowerCAmelCase__ )
_A= [f"<extra_id_{i}>" for i in range(125 )]
_A= added_tokens_extra_ids + [
'an_additional_special_token'
]
_A= added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A= tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A= added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowerCAmelCase__ )]
_A= tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def a__ ( self ):
_A= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
_A= tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_A= self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_A= ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_A= tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( self ):
_A= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_A= [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_A= 0
_A= tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + '_id' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '_id' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + '_id' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '_id' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens_ids' ) , [] )
setattr(lowerCAmelCase__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 476
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase_ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowerCamelCase_ = {
"google/rembert": 2_5_6,
}
lowerCamelCase_ = "▁"
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = RemBertTokenizer
def __init__( self ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="[CLS]" ,SCREAMING_SNAKE_CASE__="[SEP]" ,SCREAMING_SNAKE_CASE__="<unk>" ,SCREAMING_SNAKE_CASE__="[SEP]" ,SCREAMING_SNAKE_CASE__="<pad>" ,SCREAMING_SNAKE_CASE__="[CLS]" ,SCREAMING_SNAKE_CASE__="[MASK]" ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,do_lower_case=__lowerCamelCase ,remove_space=__lowerCamelCase ,keep_accents=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
__SCREAMING_SNAKE_CASE :str = do_lower_case
__SCREAMING_SNAKE_CASE :int = remove_space
__SCREAMING_SNAKE_CASE :List[str] = keep_accents
__SCREAMING_SNAKE_CASE :Optional[Any] = vocab_file
__SCREAMING_SNAKE_CASE :Dict = False if not self.vocab_file else True
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = [self.sep_token_id]
__SCREAMING_SNAKE_CASE :int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ) -> int:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Dict:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
__SCREAMING_SNAKE_CASE :Optional[int] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 498
|
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase_ = 1.0_5457_1817e-34 # unit of ℏ : J * s
lowerCamelCase_ = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_SCREAMING_SNAKE_CASE = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_SCREAMING_SNAKE_CASE = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_SCREAMING_SNAKE_CASE = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = os.path.dirname(os.path.realpath(UpperCAmelCase_ ) )
A__ = os.path.join(UpperCAmelCase_, "words.txt" )
A__ = ""
with open(UpperCAmelCase_ ) as f:
A__ = f.readline()
A__ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
A__ = [
word
for word in [sum(ord(UpperCAmelCase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 702
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : str = (DEISMultistepScheduler,)
A__ : List[str] = (("num_inference_steps", 2_5),)
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self ) -> List[Any]:
pass
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> str:
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def snake_case__ ( self ) -> Tuple:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
A__ = dummy_past_residuals[: scheduler.config.solver_order]
A__ = scheduler.timesteps[5]
A__ = scheduler.timesteps[6]
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self ) -> List[str]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A__ = DEISMultistepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self ) -> List[str]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[int]:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type="deis" , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Dict:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
A__ = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def snake_case__ ( self ) -> Optional[int]:
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def snake_case__ ( self ) -> int:
A__ = self.full_loop()
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.full_loop(prediction_type="v_prediction" )
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case__ ( self ) -> List[str]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 562
| 0
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase ( unittest.TestCase ):
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.nn.Linear(10 , 10 )
lowerCAmelCase__ : Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase__ : Optional[Any] = Accelerator()
lowerCAmelCase__ : Dict = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 233
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : List[Any] = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
A__ : Optional[Any] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
A__ : List[Any] = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _a ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
return float((preds == labels).mean() )
def _a ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple="binary" ):
lowerCAmelCase__ : Dict = simple_accuracy(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase__ : List[Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any ):
lowerCAmelCase__ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = f'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
lowerCAmelCase__ : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Union[str, Any] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*__UpperCamelCase )
lowerCAmelCase__ : Optional[int] = fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ,average='''macro''' )
fas.append(__UpperCamelCase )
lowerCAmelCase__ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
lowerCAmelCase__ : Dict = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = sum(__UpperCamelCase ) / len(__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = float(fa_score(y_true=__UpperCamelCase ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowercase_ ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif self.config_name == "cb":
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase__ : Union[str, Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase__ : Optional[Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 233
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 397
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def a ( *_lowercase : List[Any] , **_lowercase : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : List[str] ):
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a ( self : int , _lowercase : List[Any] , _lowercase : str ):
__UpperCAmelCase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
import datasets
__UpperCAmelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__UpperCAmelCase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__UpperCAmelCase = object_detector(_lowercase , threshold=0.0 )
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def a ( self : Dict ):
pass
@require_torch
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(_lowercase )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_lowercase )
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
] , )
@require_torch
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(_lowercase )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_lowercase )
__UpperCAmelCase = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def a ( self : Dict ):
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__UpperCAmelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def a ( self : Tuple ):
__UpperCAmelCase = 0.9_985
__UpperCAmelCase = '''facebook/detr-resnet-50'''
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase )
__UpperCAmelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def a ( self : List[str] ):
__UpperCAmelCase = '''Narsil/layoutlmv3-finetuned-funsd'''
__UpperCAmelCase = 0.9_993
__UpperCAmelCase = pipeline('''object-detection''' , model=_lowercase , threshold=_lowercase )
__UpperCAmelCase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
] , )
| 397
| 1
|
"""simple docstring"""
def __a ( a ): # noqa: E741
"""simple docstring"""
_a = len(a )
_a = 0
_a = [0] * n
_a = [False] * n
_a = [False] * n
def dfs(a, a, a, a ):
if parent == root:
out_edge_count += 1
_a = True
_a = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_a = dfs(a, a, a, a )
_a = min(low[at], low[to] )
# AP found via bridge
if at < low[to]:
_a = True
# AP found via cycle
if at == low[to]:
_a = True
else:
_a = min(low[at], a )
return out_edge_count
for i in range(a ):
if not visited[i]:
_a = 0
_a = dfs(a, a, -1, a )
_a = out_edge_count > 1
for x in range(len(a ) ):
if is_art[x] is True:
print(a )
# Adjacency list of graph
__SCREAMING_SNAKE_CASE = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 388
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[int]=13 , UpperCamelCase__ :Optional[Any]=7 , UpperCamelCase__ :Dict=True , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Any=True , UpperCamelCase__ :Tuple=True , UpperCamelCase__ :Dict=99 , UpperCamelCase__ :Union[str, Any]=32 , UpperCamelCase__ :Dict=2 , UpperCamelCase__ :List[str]=4 , UpperCamelCase__ :Any=37 , UpperCamelCase__ :int="gelu" , UpperCamelCase__ :str=0.1 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :Optional[Any]=512 , UpperCamelCase__ :Optional[Any]=16 , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :Optional[Any]=0.02 , UpperCamelCase__ :List[Any]=False , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Optional[int]="None" , UpperCamelCase__ :Any=3 , UpperCamelCase__ :Optional[int]=4 , UpperCamelCase__ :List[str]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :int ):
_a = TFDebertaVaModel(config=UpperCamelCase__ )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = [input_ids, input_mask]
_a = model(UpperCamelCase__ )
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :int , UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[str] ):
_a = TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :str , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :str , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple , UpperCamelCase__ :Any ):
_a = self.num_labels
_a = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :int , UpperCamelCase__ :Any , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] ):
_a = self.num_labels
_a = TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Any ):
_a = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = TFDebertaVaModelTester(self )
_a = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
_a = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 )
| 388
| 1
|
"""simple docstring"""
from torch import nn
def lowercase__ ( snake_case_ :int ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 717
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = "cvt"
def __init__( self : List[str] , _lowercase : str=3 , _lowercase : Tuple=[7, 3, 3] , _lowercase : Any=[4, 2, 2] , _lowercase : str=[2, 1, 1] , _lowercase : Union[str, Any]=[64, 1_92, 3_84] , _lowercase : Dict=[1, 3, 6] , _lowercase : List[str]=[1, 2, 10] , _lowercase : Optional[int]=[4.0, 4.0, 4.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Dict=[0.0, 0.0, 0.0] , _lowercase : Tuple=[0.0, 0.0, 0.1] , _lowercase : Dict=[True, True, True] , _lowercase : Union[str, Any]=[False, False, True] , _lowercase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _lowercase : int=[3, 3, 3] , _lowercase : int=[1, 1, 1] , _lowercase : Optional[Any]=[2, 2, 2] , _lowercase : List[str]=[1, 1, 1] , _lowercase : int=[1, 1, 1] , _lowercase : Union[str, Any]=0.02 , _lowercase : Optional[Any]=1E-12 , **_lowercase : str , ):
super().__init__(**_lowercase )
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = patch_stride
__UpperCAmelCase = patch_padding
__UpperCAmelCase = embed_dim
__UpperCAmelCase = num_heads
__UpperCAmelCase = depth
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = attention_drop_rate
__UpperCAmelCase = drop_rate
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = cls_token
__UpperCAmelCase = qkv_projection_method
__UpperCAmelCase = kernel_qkv
__UpperCAmelCase = padding_kv
__UpperCAmelCase = stride_kv
__UpperCAmelCase = padding_q
__UpperCAmelCase = stride_q
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
| 397
| 0
|
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( UpperCamelCase__ ):
__lowerCamelCase = """SpeechT5FeatureExtractor"""
__lowerCamelCase = """SpeechT5Tokenizer"""
def __init__( self : List[str] , __a : Dict , __a : Union[str, Any] ):
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : List[str] , *__a : List[Any] , **__a : int ):
'''simple docstring'''
lowerCamelCase__: List[str] = kwargs.pop("""audio""" , _UpperCAmelCase )
lowerCamelCase__: Union[str, Any] = kwargs.pop("""text""" , _UpperCAmelCase )
lowerCamelCase__: Any = kwargs.pop("""text_target""" , _UpperCAmelCase )
lowerCamelCase__: List[str] = kwargs.pop("""audio_target""" , _UpperCAmelCase )
lowerCamelCase__: Any = kwargs.pop("""sampling_rate""" , _UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
lowerCamelCase__: Any = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
elif text is not None:
lowerCamelCase__: Union[str, Any] = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowerCamelCase__: Any = None
if audio_target is not None:
lowerCamelCase__: List[str] = self.feature_extractor(audio_target=_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase__: Tuple = targets["""input_values"""]
elif text_target is not None:
lowerCamelCase__: int = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase__: List[Any] = targets["""input_ids"""]
else:
lowerCamelCase__: Optional[int] = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__: Union[str, Any] = labels
lowerCamelCase__: Tuple = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCamelCase__: List[Any] = decoder_attention_mask
return inputs
def lowerCamelCase_ ( self : Tuple , *__a : List[Any] , **__a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: int = kwargs.pop("""input_values""" , _UpperCAmelCase )
lowerCamelCase__: Any = kwargs.pop("""input_ids""" , _UpperCAmelCase )
lowerCamelCase__: Dict = kwargs.pop("""labels""" , _UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
lowerCamelCase__: List[str] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif input_ids is not None:
lowerCamelCase__: Any = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowerCamelCase__: Dict = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCamelCase__: List[str] = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase__: List[Any] = targets["""input_ids"""]
else:
lowerCamelCase__: Any = self.feature_extractor.feature_size
lowerCamelCase__: int = self.feature_extractor.num_mel_bins
lowerCamelCase__: Any = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
lowerCamelCase__: List[Any] = feature_size_hack
lowerCamelCase__: Dict = targets["""input_values"""]
else:
lowerCamelCase__: int = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__: Optional[int] = labels
lowerCamelCase__: List[str] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCamelCase__: Dict = decoder_attention_mask
return inputs
def lowerCamelCase_ ( self : Optional[int] , *__a : Dict , **__a : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase_ ( self : Tuple , *__a : str , **__a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
| 306
|
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = len(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
for j in range(i + 1 , UpperCAmelCase__ ):
if numbers[j] < numbers[i]:
a_ , a_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A_ : Optional[Any] =input("""Enter numbers separated by a comma:\n""").strip()
A_ : List[str] =[int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 483
| 0
|
from math import isqrt
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def _UpperCAmelCase ( UpperCamelCase: str = 1_0**6 ):
"""simple docstring"""
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701
|
import math
import sys
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
__lowerCAmelCase = binary_file.read()
for dat in data:
__lowerCAmelCase = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = {"0": "0", "1": "1"}
__lowerCAmelCase , __lowerCAmelCase = "", ""
__lowerCAmelCase = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCAmelCase = lexicon[curr_string]
result += last_match_id
__lowerCAmelCase = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
__lowerCAmelCase = {}
for curr_key in list(UpperCamelCase ):
__lowerCAmelCase = lexicon.pop(UpperCamelCase )
__lowerCAmelCase = new_lex
__lowerCAmelCase = last_match_id + "1"
index += 1
__lowerCAmelCase = ""
return result
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
__lowerCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCAmelCase = data_bits[counter:]
__lowerCAmelCase = data_bits[counter + 1 :]
return data_bits
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = read_file_binary(UpperCamelCase )
__lowerCAmelCase = remove_prefix(UpperCamelCase )
__lowerCAmelCase = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 376
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36
|
from __future__ import annotations
def lowercase ( __A : int ) -> list[int]:
'''simple docstring'''
snake_case : Dict = 2
snake_case : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A )
if n > 1:
factors.append(__A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
__snake_case: int = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def UpperCAmelCase__ ( self : Dict ):
torch.manual_seed(0 )
__snake_case: int = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
__snake_case: List[str] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__snake_case: Tuple = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case: Dict = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__snake_case: Optional[Any] = DDPMScheduler()
__snake_case: Any = AudioDiffusionPipeline(vqvae=A , unet=self.dummy_unet , mel=A , scheduler=A )
__snake_case: str = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = torch.Generator(device=A ).manual_seed(42 )
__snake_case: Tuple = pipe(generator=A , steps=4 )
__snake_case: str = output.audios[0]
__snake_case: Union[str, Any] = output.images[0]
__snake_case: Tuple = torch.Generator(device=A ).manual_seed(42 )
__snake_case: List[str] = pipe(generator=A , steps=4 , return_dict=A )
__snake_case: Tuple = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__snake_case: int = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: str = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Optional[int] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__snake_case: Dict = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__snake_case: List[Any] = DDIMScheduler()
__snake_case: Any = self.dummy_vqvae_and_unet
__snake_case: str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A , scheduler=A )
__snake_case: Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
np.random.seed(0 )
__snake_case: Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__snake_case: Any = torch.Generator(device=A ).manual_seed(42 )
__snake_case: List[Any] = pipe(raw_audio=A , generator=A , start_step=5 , steps=10 )
__snake_case: List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__snake_case: int = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__snake_case: Optional[Any] = self.dummy_unet_condition
__snake_case: str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A , mel=A , scheduler=A )
__snake_case: Union[str, Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
np.random.seed(0 )
__snake_case: Tuple = torch.rand((1, 1, 10) )
__snake_case: List[str] = pipe(generator=A , encoding=A )
__snake_case: Union[str, Any] = output.images[0]
__snake_case: Any = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Dict = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[Any] = torch_device
__snake_case: int = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__snake_case: Optional[int] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = torch.Generator(device=A ).manual_seed(42 )
__snake_case: Tuple = pipe(generator=A )
__snake_case: str = output.audios[0]
__snake_case: int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__snake_case: str = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__snake_case: Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 155
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase : Tuple = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 155
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : "DiagonalGaussianDistribution"
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : str = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_82_15 , ) -> Tuple:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowercase__ : List[Any] = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
lowercase__ : Union[str, Any] = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
lowercase__ : Optional[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowercase__ : Optional[int] = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
lowercase__ : List[Any] = False
lowercase__ : str = False
# only relevant if vae tiling is enabled
lowercase__ : int = self.config.sample_size
lowercase__ : Dict = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowercase__ : Any = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowercase__ : str = 0.25
def __a ( self , lowerCamelCase , lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
lowercase__ : int = value
def __a ( self , lowerCamelCase = True ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = use_tiling
def __a ( self ) -> Any:
"""simple docstring"""
self.enable_tiling(lowerCamelCase )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : str = True
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __a ( self ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
lowercase__ : Any = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
lowercase__ : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def __a ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
lowercase__ : int = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
lowercase__ : Any = torch.cat(lowerCamelCase )
else:
lowercase__ : Optional[int] = self.encoder(lowerCamelCase )
lowercase__ : Optional[Any] = self.quant_conv(lowerCamelCase )
lowercase__ : str = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
lowercase__ : Tuple = self.post_quant_conv(lowerCamelCase )
lowercase__ : List[Any] = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
lowercase__ : Optional[Any] = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
lowercase__ : Dict = torch.cat(lowerCamelCase )
else:
lowercase__ : Dict = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
lowercase__ : Union[str, Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : str = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
lowercase__ : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> AutoencoderKLOutput:
"""simple docstring"""
lowercase__ : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowercase__ : Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowercase__ : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowercase__ : Optional[int] = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
lowercase__ : int = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
lowercase__ : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowercase__ : Any = self.encoder(lowerCamelCase )
lowercase__ : Optional[int] = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
lowercase__ : List[str] = []
for i, row in enumerate(lowerCamelCase ):
lowercase__ : Optional[int] = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase__ : Dict = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
lowercase__ : Any = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
lowercase__ : Dict = torch.cat(lowerCamelCase , dim=2 )
lowercase__ : List[str] = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
lowercase__ : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowercase__ : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowercase__ : Union[str, Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowercase__ : List[Any] = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
lowercase__ : Dict = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
lowercase__ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowercase__ : int = self.post_quant_conv(lowerCamelCase )
lowercase__ : Optional[Any] = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
lowercase__ : List[str] = []
for i, row in enumerate(lowerCamelCase ):
lowercase__ : str = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowercase__ : Tuple = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
lowercase__ : Optional[int] = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
lowercase__ : str = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
lowercase__ : Optional[int] = sample
lowercase__ : List[Any] = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
lowercase__ : Union[str, Any] = posterior.sample(generator=lowerCamelCase )
else:
lowercase__ : int = posterior.mode()
lowercase__ : Tuple = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 397
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
lowercase ,lowercase = image.size
lowercase ,lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase = image.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] )
lowercase = np.array(UpperCAmelCase ).astype(np.floataa ) / 255.0
lowercase = image[None].transpose(0, 3, 1, 2 )
lowercase = torch.from_numpy(UpperCAmelCase )
return 2.0 * image - 1.0
class __lowercase ( _A ):
def __init__( self : Tuple , __lowerCamelCase : VQModel , __lowerCamelCase : UNetaDModel , __lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : Tuple , __lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Optional[int] = 1_00 , __lowerCamelCase : Optional[float] = 0.0 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(__lowerCamelCase , PIL.Image.Image ):
lowercase = 1
elif isinstance(__lowerCamelCase , torch.Tensor ):
lowercase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase )}' )
if isinstance(__lowerCamelCase , PIL.Image.Image ):
lowercase = preprocess(__lowerCamelCase )
lowercase ,lowercase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase = next(self.unet.parameters() ).dtype
lowercase = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
lowercase = image.to(device=self.device , dtype=__lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
lowercase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
for t in self.progress_bar(__lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
lowercase = torch.cat([latents, image] , dim=1 )
lowercase = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
lowercase = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
lowercase = self.vqvae.decode(__lowerCamelCase ).sample
lowercase = torch.clamp(__lowerCamelCase , -1.0 , 1.0 )
lowercase = image / 2 + 0.5
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 706
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def __UpperCAmelCase ( UpperCAmelCase )-> Dict:
"""simple docstring"""
lowercase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
A_ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = list(s_dict.keys() )
for key in keys:
lowercase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase = new_key.replace(UpperCAmelCase, UpperCAmelCase )
print(f'{key} -> {new_key}' )
lowercase = s_dict.pop(UpperCAmelCase )
return s_dict
def __UpperCAmelCase ( UpperCAmelCase )-> str:
"""simple docstring"""
lowercase ,lowercase = emb.weight.shape
lowercase = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase )
lowercase = emb.weight.data
return lin_layer
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> bytes:
"""simple docstring"""
os.makedirs(UpperCAmelCase, exist_ok=UpperCAmelCase )
lowercase = os.path.basename(UpperCAmelCase )
lowercase = url.split('''/''' )[-2]
lowercase = os.path.join(UpperCAmelCase, UpperCAmelCase )
if os.path.exists(UpperCAmelCase ) and not os.path.isfile(UpperCAmelCase ):
raise RuntimeError(f'{download_target} exists and is not a regular file' )
if os.path.isfile(UpperCAmelCase ):
lowercase = open(UpperCAmelCase, '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(UpperCAmelCase ) as source, open(UpperCAmelCase, '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ), ncols=80, unit='''iB''', unit_scale=UpperCAmelCase, unit_divisor=1024 ) as loop:
while True:
lowercase = source.read(8192 )
if not buffer:
break
output.write(UpperCAmelCase )
loop.update(len(UpperCAmelCase ) )
lowercase = open(UpperCAmelCase, '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
if ".pt" not in checkpoint_path:
lowercase = _download(_MODELS[checkpoint_path] )
else:
lowercase = torch.load(UpperCAmelCase, map_location='''cpu''' )
lowercase = original_checkpoint['''dims''']
lowercase = original_checkpoint['''model_state_dict''']
lowercase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
lowercase = True
lowercase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowercase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''], encoder_ffn_dim=UpperCAmelCase, decoder_ffn_dim=UpperCAmelCase, num_mel_bins=dimensions['''n_mels'''], d_model=dimensions['''n_audio_state'''], max_target_positions=dimensions['''n_text_ctx'''], encoder_layers=dimensions['''n_audio_layer'''], encoder_attention_heads=dimensions['''n_audio_head'''], decoder_layers=dimensions['''n_text_layer'''], decoder_attention_heads=dimensions['''n_text_state'''], max_source_positions=dimensions['''n_audio_ctx'''], )
lowercase = WhisperForConditionalGeneration(UpperCAmelCase )
lowercase ,lowercase = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f' but all the following weights are missing {missing}' )
if tie_embeds:
lowercase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase = proj_out_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
A_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 479
| 0
|
import math
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def _lowerCAmelCase ( self : Dict , __a : Union[str, Any] , __a : Tuple ) ->int:
lowerCamelCase_ : str = 0.0
lowerCamelCase_ : Tuple = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _lowerCAmelCase ( self : Optional[int] , __a : str , __a : Any , __a : Any , __a : List[Any] ) ->list[list[int | float]]:
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowerCamelCase ( ) -> int:
# Training Examples ( m, n )
lowerCamelCase_ : str = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase_ : List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase_ : int = SelfOrganizingMap()
lowerCamelCase_ : Dict = 3
lowerCamelCase_ : Optional[Any] = 0.5
for _ in range(__SCREAMING_SNAKE_CASE ):
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
# training sample
lowerCamelCase_ : Any = training_samples[j]
# Compute the winning vector
lowerCamelCase_ : Tuple = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Update the winning vector
lowerCamelCase_ : str = self_organizing_map.update(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# classify test sample
lowerCamelCase_ : Dict = [0, 0, 0, 1]
lowerCamelCase_ : Optional[Any] = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 278
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = size if size is not None else {'shortest_edge': 20}
lowercase_ : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase_ : str = parent
lowercase_ : str = batch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[str] = image_size
lowercase_ : Dict = min_resolution
lowercase_ : Union[str, Any] = max_resolution
lowercase_ : Dict = do_resize
lowercase_ : Any = size
lowercase_ : str = do_center_crop
lowercase_ : Tuple = crop_size
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = MobileNetVaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = MobileNetVaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'size' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_center_crop' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'crop_size' ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,Image.Image )
# Test not batched input
lowercase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : List[str] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,np.ndarray )
# Test not batched input
lowercase_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : Optional[int] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,torch.Tensor )
# Test not batched input
lowercase_ : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : Dict = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 425
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Image ):
'''simple docstring'''
lowercase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Image ):
'''simple docstring'''
lowercase_ = np.array(__lowerCamelCase )
lowercase_ = npimg.shape
return {"hash": hashimage(__lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = MaskGenerationPipeline(model=UpperCAmelCase , image_processor=UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
@slow
@require_torch
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
lowercase_ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
lowercase_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = "facebook/sam-vit-huge"
lowercase_ = pipeline("mask-generation" , model=UpperCAmelCase )
lowercase_ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowercase_ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053},
] , )
| 601
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowercase_ = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(__lowerCamelCase )[0].split("." )[-2]
lowercase_ = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
lowercase_ = "weight_g"
elif "weight_v" in name:
lowercase_ = "weight_v"
elif "weight" in name:
lowercase_ = "weight"
elif "bias" in name:
lowercase_ = "bias"
else:
lowercase_ = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = full_name.split("conv_layers." )[-1]
lowercase_ = name.split("." )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = SEWConfig()
if is_finetuned:
lowercase_ = model.wav_encoder.wav_model.cfg
else:
lowercase_ = model.cfg
lowercase_ = fs_config.conv_bias
lowercase_ = eval(fs_config.conv_feature_layers )
lowercase_ = [x[0] for x in conv_layers]
lowercase_ = [x[1] for x in conv_layers]
lowercase_ = [x[2] for x in conv_layers]
lowercase_ = "gelu"
lowercase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
lowercase_ = 0.0
lowercase_ = fs_config.activation_fn.name
lowercase_ = fs_config.encoder_embed_dim
lowercase_ = 0.02
lowercase_ = fs_config.encoder_ffn_embed_dim
lowercase_ = 1E-5
lowercase_ = fs_config.encoder_layerdrop
lowercase_ = fs_config.encoder_attention_heads
lowercase_ = fs_config.conv_pos_groups
lowercase_ = fs_config.conv_pos
lowercase_ = len(__lowerCamelCase )
lowercase_ = fs_config.encoder_layers
lowercase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ = model.cfg
lowercase_ = fs_config.final_dropout
lowercase_ = fs_config.layerdrop
lowercase_ = fs_config.activation_dropout
lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ = fs_config.attention_dropout
lowercase_ = fs_config.dropout_input
lowercase_ = fs_config.dropout
lowercase_ = fs_config.mask_channel_length
lowercase_ = fs_config.mask_channel_prob
lowercase_ = fs_config.mask_length
lowercase_ = fs_config.mask_prob
lowercase_ = "Wav2Vec2FeatureExtractor"
lowercase_ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple=None , __lowerCamelCase: List[Any]=None , __lowerCamelCase: str=True ):
'''simple docstring'''
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ = SEWConfig.from_pretrained(__lowerCamelCase )
else:
lowercase_ = convert_config(model[0] , __lowerCamelCase )
lowercase_ = model[0].eval()
lowercase_ = True if config.feat_extract_norm == "layer" else False
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if is_finetuned:
if dict_path:
lowercase_ = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.eos_index
lowercase_ = len(target_dict.symbols )
lowercase_ = os.path.join(__lowerCamelCase , "vocab.json" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
lowercase_ = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCamelCase , )
lowercase_ = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowercase_ = SEWForCTC(__lowerCamelCase )
else:
lowercase_ = SEWModel(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 601
| 1
|
import functools
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ) or not all(isinstance(lowercase , lowercase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowercase ) != 3 or not all(isinstance(lowercase , lowercase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowercase ) == 0:
return 0
if min(lowercase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowercase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
lowerCamelCase_ = set(lowercase )
@functools.cache
def dynamic_programming(lowercase : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = SMALL_MODEL_IDENTIFIER
__UpperCamelCase : List[str] = """pt"""
__UpperCamelCase : List[Any] = """tf"""
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase )
model_tf.save_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Any = """mock_framework"""
# Framework provided - return whatever the user provides
__UpperCamelCase : List[Any] = FeaturesManager.determine_framework(self.test_model , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Any = FeaturesManager.determine_framework(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase )
__UpperCamelCase : Optional[Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : str = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCamelCase : Optional[int] = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCamelCase : Dict = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Any = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
__UpperCamelCase : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
__UpperCamelCase : Union[str, Any] = MagicMock(return_value=lowerCAmelCase )
__UpperCamelCase : Tuple = MagicMock(return_value=lowerCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase ):
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : str = FeaturesManager.determine_framework(self.test_model )
| 279
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
snake_case = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
snake_case = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = """ Hello world! cécé herlolip"""
snake_case = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = dct.pop(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.load(lowercase , map_location="cpu" )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = emb.weight.shape
SCREAMING_SNAKE_CASE : Dict = nn.Linear(lowercase , lowercase , bias=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
if not os.path.exists(lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load("pytorch/fairseq" , lowercase ).eval()
else:
SCREAMING_SNAKE_CASE : Dict = load_xsum_checkpoint(lowercase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint_path.replace("." , "-" )
SCREAMING_SNAKE_CASE : str = BartConfig.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE : Tuple = bart.encode(lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Any = BartTokenizer.from_pretrained(lowercase ).encode(lowercase , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(lowercase , lowercase ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
SCREAMING_SNAKE_CASE : Any = bart.state_dict()
remove_ignore_keys_(lowercase )
SCREAMING_SNAKE_CASE : int = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification(lowercase ).eval()
model.load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Any = bart.predict("mnli" , lowercase , return_logits=lowercase )
SCREAMING_SNAKE_CASE : Tuple = model(lowercase )[0] # logits
else: # no classification heads to worry about
SCREAMING_SNAKE_CASE : Dict = bart.model.state_dict()
remove_ignore_keys_(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE : Optional[Any] = bart.extract_features(lowercase )
if hf_checkpoint_name == "facebook/bart-large":
SCREAMING_SNAKE_CASE : int = BartModel(lowercase ).eval()
model.load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase ).model[0]
else:
SCREAMING_SNAKE_CASE : Tuple = BartForConditionalGeneration(lowercase ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowercase )
if hasattr(lowercase , "lm_head" ):
SCREAMING_SNAKE_CASE : Dict = make_linear_from_emb(model.model.shared )
SCREAMING_SNAKE_CASE : Tuple = model.model(lowercase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
snake_case = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 721
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 488
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Optional[Any] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] ="""bridgetower_vision_model"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=288 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase__ )
a : List[Any] = hidden_size
a : int = num_hidden_layers
a : Optional[Any] = num_channels
a : int = patch_size
a : Tuple = image_size
a : List[Any] = initializer_factor
a : Dict = layer_norm_eps
a : List[Any] = stop_gradient
a : List[Any] = share_layernorm
a : Dict = remove_last_layer
@classmethod
def __a ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
a, a : str = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
a : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] ="""bridgetower_text_model"""
def __init__( self , lowerCAmelCase__=5_0265 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=1 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=514 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
a : List[Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Tuple = num_attention_heads
a : int = hidden_act
a : Optional[Any] = initializer_factor
a : str = intermediate_size
a : List[str] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : str = type_vocab_size
a : List[Any] = layer_norm_eps
a : str = position_embedding_type
a : List[Any] = use_cache
a : int = pad_token_id
a : Union[str, Any] = bos_token_id
a : Any = eos_token_id
@classmethod
def __a ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
a, a : str = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
a : str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] ="""bridgetower"""
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=768 , lowerCAmelCase__=1 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=False , lowerCAmelCase__="add" , lowerCAmelCase__=12 , lowerCAmelCase__=6 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
# TODO: remove this once the Hub files are updated.
a : Union[str, Any] = kwargs.pop("text_config_dict" , lowerCAmelCase__ )
a : List[Any] = kwargs.pop("vision_config_dict" , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
a : Optional[int] = share_cross_modal_transformer_layers
a : Optional[Any] = hidden_act
a : Tuple = hidden_size
a : Tuple = initializer_factor
a : List[Any] = layer_norm_eps
a : Tuple = share_link_tower_layers
a : Dict = link_tower_type
a : Union[str, Any] = num_attention_heads
a : Any = num_hidden_layers
a : Dict = tie_word_embeddings
a : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
a : Tuple = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
a : str = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
a : Optional[Any] = BridgeTowerTextConfig(**lowerCAmelCase__ )
a : Dict = BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : Optional[Any] = copy.deepcopy(self.__dict__ )
a : Optional[int] = self.text_config.to_dict()
a : Optional[int] = self.vision_config.to_dict()
a : int = self.__class__.model_type
return output
| 633
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a : List[Any] = '''\
'''
a : Optional[int] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a : List[Any] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 , lowerCAmelCase__ = True , lowerCAmelCase__=None ) -> Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a : Optional[Any] = "cuda"
else:
a : str = "cuda" if torch.cuda.is_available() else "cpu"
a : str = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
a : int = model.to(lowerCAmelCase__ )
a : str = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCAmelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a : List[Any] = model.config.max_length - 1
else:
a : Union[str, Any] = model.config.max_length
a : Union[str, Any] = tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="pt" , return_attention_mask=lowerCAmelCase__ , ).to(lowerCAmelCase__ )
a : str = encodings["input_ids"]
a : Optional[int] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a : List[Any] = []
a : List[Any] = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ):
a : Optional[Any] = min(start_index + batch_size , len(lowerCAmelCase__ ) )
a : Any = encoded_texts[start_index:end_index]
a : List[Any] = attn_masks[start_index:end_index]
if add_start_token:
a : int = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCAmelCase__ )
a : str = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
a : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCAmelCase__ ), attn_mask] , dim=1 )
a : Any = encoded_batch
with torch.no_grad():
a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).logits
a : List[Any] = out_logits[..., :-1, :].contiguous()
a : int = labels[..., 1:].contiguous()
a : Union[str, Any] = attn_mask[..., 1:].contiguous()
a : Dict = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowerCAmelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCAmelCase__ )}
| 633
| 1
|
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
__snake_case = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__snake_case = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : int = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase( _a ):
snake_case_ : str = """blenderbot-small"""
snake_case_ : List[Any] = ["""past_key_values"""]
snake_case_ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE : Any=8 , SCREAMING_SNAKE_CASE : int=2_0_4_8 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : Tuple=8 , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=2 , **SCREAMING_SNAKE_CASE : Dict , ) -> Optional[int]:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
class UpperCamelCase( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = {0: "batch"}
__snake_case = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__snake_case = {0: "batch", 1: "decoder_sequence"}
__snake_case = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
else:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
__snake_case = common_inputs["decoder_input_ids"].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers
__snake_case = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs["attention_mask"].dtype
__snake_case = torch.cat(
[common_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE )
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
__snake_case = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
__snake_case = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
else:
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__snake_case = super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 473
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
lowercase = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , x.transpose() ) )
lowercase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , transpose(_lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , transpose(_lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , transpose(_lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , transpose(_lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , np.asarray(transpose(_lowerCAmelCase ) ) ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , np.reshape(_lowerCAmelCase , (4, 3) ) ) )
lowercase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , np.reshape(_lowerCAmelCase , (12, 5) ) ) )
@require_torch
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , reshape(_lowerCAmelCase , (4, 3) ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , reshape(_lowerCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , reshape(_lowerCAmelCase , (4, 3) ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , reshape(_lowerCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , np.asarray(reshape(_lowerCAmelCase , (4, 3) ) ) ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , np.asarray(reshape(_lowerCAmelCase , (12, 5) ) ) ) )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , np.squeeze(_lowerCAmelCase ) ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , np.squeeze(_lowerCAmelCase , axis=2 ) ) )
@require_torch
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = np.random.randn(1 , 3 , 4 )
lowercase = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , squeeze(_lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
lowercase = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , squeeze(_lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def _a ( self ) -> int:
'''simple docstring'''
lowercase = np.random.randn(1 , 3 , 4 )
lowercase = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , squeeze(_lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
lowercase = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , squeeze(_lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = np.random.randn(1 , 3 , 4 )
lowercase = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , np.asarray(squeeze(_lowerCAmelCase ) ) ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
lowercase = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , np.asarray(squeeze(_lowerCAmelCase , axis=2 ) ) ) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , np.expand_dims(_lowerCAmelCase , axis=1 ) ) )
@require_torch
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , expand_dims(_lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , expand_dims(_lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = np.random.randn(3 , 4 )
lowercase = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(_lowerCAmelCase , axis=1 ) ) ) )
| 588
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : str = logging.getLogger(__name__)
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase=-1 ) -> List[str]:
'''simple docstring'''
lowercase = label_idx
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = mode.value
lowercase = os.path.join(_lowerCAmelCase , F"""{mode}.txt""" )
lowercase = 1
lowercase = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
lowercase = []
lowercase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
lowercase = []
lowercase = []
else:
lowercase = line.split(""" """ )
words.append(splits[0] )
if len(_lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
return examples
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(_lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(_lowerCAmelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(_lowerCAmelCase , """r""" ) as f:
lowercase = f.read().splitlines()
if "O" not in labels:
lowercase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__(label_idx=-2 )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(_lowerCAmelCase , """r""" ) as f:
lowercase = f.read().splitlines()
if "O" not in labels:
lowercase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase (_UpperCAmelCase ):
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = mode.value
lowercase = os.path.join(_lowerCAmelCase , F"""{mode}.txt""" )
lowercase = 1
lowercase = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(_lowerCAmelCase ):
lowercase = []
lowercase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
return examples
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for sentence in parse_incr(_lowerCAmelCase ):
lowercase = preds_list[example_id]
lowercase = """"""
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_lowerCAmelCase )
example_id += 1
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(_lowerCAmelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 588
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[int] = FunnelConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
snake_case__ :Tuple = FunnelBaseModel(__snake_case ) if base_model else FunnelModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__UpperCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case__ , snake_case__ = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
snake_case__ = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
snake_case__ = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ : int = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 33
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCamelCase: Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = ["audio_values", "audio_mask"]
def __init__( self: Any, lowerCamelCase_: int=2048, lowerCamelCase_: str=1, lowerCamelCase_: str=[16, 16], lowerCamelCase_: Union[str, Any]=128, lowerCamelCase_: Tuple=44100, lowerCamelCase_: Union[str, Any]=86, lowerCamelCase_: Optional[Any]=2048, lowerCamelCase_: Dict=0.0, **lowerCamelCase_: str, ):
super().__init__(
feature_size=lowerCamelCase_, sampling_rate=lowerCamelCase_, padding_value=lowerCamelCase_, **lowerCamelCase_, )
lowercase__ : List[str] = spectrogram_length
lowercase__ : Union[str, Any] = num_channels
lowercase__ : int = patch_size
lowercase__ : Union[str, Any] = feature_size // self.patch_size[1]
lowercase__ : Optional[int] = n_fft
lowercase__ : int = sampling_rate // hop_length_to_sampling_rate
lowercase__ : str = sampling_rate
lowercase__ : Union[str, Any] = padding_value
lowercase__ : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=lowerCamelCase_, min_frequency=0.0, max_frequency=2_2_0_5_0.0, sampling_rate=lowerCamelCase_, norm='slaney', mel_scale='slaney', ).T
def snake_case__( self: Tuple, lowerCamelCase_: np.array ):
lowercase__ : Optional[Any] = spectrogram(
lowerCamelCase_, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=8_0.0, )
lowercase__ : Optional[Any] = log_spec[:, :-1]
lowercase__ : int = log_spec - 2_0.0
lowercase__ : Dict = np.clip(log_spec / 4_0.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self: Dict, lowerCamelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase_: Optional[Union[str, TensorType]] = None, lowerCamelCase_: Optional[bool] = True, lowerCamelCase_: Optional[int] = None, lowerCamelCase_: bool = False, lowerCamelCase_: bool = False, **lowerCamelCase_: str, ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ : Any = isinstance(lowerCamelCase_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : Optional[Any] = is_batched_numpy or (
isinstance(lowerCamelCase_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : int = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_, np.ndarray ):
lowercase__ : Optional[int] = np.asarray(lowerCamelCase_, dtype=np.floataa )
elif isinstance(lowerCamelCase_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], lowerCamelCase_ ):
lowercase__ : str = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ : Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ : Any = np.array(lowerCamelCase_ ).astype(np.floataa )
# convert into correct format for padding
lowercase__ : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ : Tuple = np.ones([len(lowerCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ : Dict = padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase_ ) ):
lowercase__ : Union[str, Any] = audio_features[i]
lowercase__ : str = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ : Dict = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ : int = {'audio_values': padded_audio_features}
lowercase__ : List[Any] = BatchFeature(data=lowerCamelCase_, tensor_type=lowerCamelCase_ )
return encoded_inputs
| 266
| 0
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , lowercase__ : str = "▁" , lowercase__ : bool = True , lowercase__ : Union[str, AddedToken] = "<unk>" , lowercase__ : Union[str, AddedToken] = "</s>" , lowercase__ : Union[str, AddedToken] = "<pad>" , ):
__lowercase : List[Any] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__lowercase : Optional[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase : Dict = token_dict["token"]
__lowercase : Optional[Any] = Tokenizer(Unigram() )
__lowercase : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
__lowercase : int = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase__ , add_prefix_space=lowercase__ ),
pre_tokenizers.Digits(individual_digits=lowercase__ ),
pre_tokenizers.Punctuation(),
] )
__lowercase : str = decoders.Metaspace(replacement=lowercase__ , add_prefix_space=lowercase__ )
__lowercase : Optional[Any] = TemplateProcessing(
single=f'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__lowercase : Optional[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(lowercase__ , lowercase__ )
def snake_case ( self : str , lowercase__ : Union[str, List[str]] , lowercase__ : int = 8_0_0_0 , lowercase__ : bool = True , ):
__lowercase : List[Any] = trainers.UnigramTrainer(
vocab_size=lowercase__ , special_tokens=self.special_tokens_list , show_progress=lowercase__ , )
if isinstance(lowercase__ , lowercase__ ):
__lowercase : Any = [files]
self._tokenizer.train(lowercase__ , trainer=lowercase__ )
self.add_unk_id()
def snake_case ( self : Any , lowercase__ : Union[Iterator[str], Iterator[Iterator[str]]] , lowercase__ : int = 8_0_0_0 , lowercase__ : bool = True , ):
__lowercase : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=lowercase__ , special_tokens=self.special_tokens_list , show_progress=lowercase__ , )
self._tokenizer.train_from_iterator(lowercase__ , trainer=lowercase__ )
self.add_unk_id()
def snake_case ( self : List[str] ):
__lowercase : Any = json.loads(self._tokenizer.to_str() )
__lowercase : Optional[int] = self.special_tokens["unk"]["id"]
__lowercase : str = Tokenizer.from_str(json.dumps(lowercase__ ) )
| 281
|
"""simple docstring"""
import os
import sys
import unittest
__A : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str ):
__lowercase : int = find_backend(" if not is_torch_available():" )
self.assertEqual(lowercase__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowercase : int = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowercase__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowercase : str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowercase__ , "torch_and_transformers_and_onnx" )
def snake_case ( self : Any ):
__lowercase : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowercase__ )
self.assertIn("torch_and_transformers" , lowercase__ )
self.assertIn("flax_and_transformers" , lowercase__ )
self.assertIn("torch_and_transformers_and_onnx" , lowercase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def snake_case ( self : Dict ):
__lowercase : Tuple = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowercase__ , "\nCONSTANT = None\n" )
__lowercase : Union[str, Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowercase__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__lowercase : Tuple = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
__lowercase : Dict = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
__lowercase : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowercase__ )
| 281
| 1
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : List[Any] = '''CompVis/stable-diffusion-v1-1'''
a : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a : Any = '''CompVis/stable-diffusion-v1-3'''
a : Optional[int] = '''CompVis/stable-diffusion-v1-4'''
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[int] , a_ : AutoencoderKL , a_ : CLIPTextModel , a_ : CLIPTokenizer , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a_ : StableDiffusionSafetyChecker , a_ : CLIPImageProcessor , a_ : bool = True , ):
"""simple docstring"""
super()._init_()
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A ( self : int ):
"""simple docstring"""
return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith("_" )}
def A ( self : int , a_ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def A ( self : str ):
"""simple docstring"""
self.enable_attention_slicing(a_ )
@torch.no_grad()
def A ( self : List[str] , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Optional[int] , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : int , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Union[str, Any] , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : Union[str, Any] , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Dict , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : Any , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Optional[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : int , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Union[str, Any] , ):
"""simple docstring"""
__snake_case = "cuda" if torch.cuda.is_available() else "cpu"
self.to(a_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 69
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase : Any = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Any ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : int = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case__ : Any = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : Dict = value
elif weight_type == "bias":
snake_case__ : int = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : int ):
snake_case__ : List[Any] = []
snake_case__ : List[str] = fairseq_model.state_dict()
snake_case__ : Optional[int] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : str = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : int = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
snake_case__ : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : List[str] = "weight"
else:
snake_case__ : List[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int ):
snake_case__ : Optional[Any] = full_name.split("conv_layers." )[-1]
snake_case__ : List[Any] = name.split("." )
snake_case__ : Optional[Any] = int(items[0] )
snake_case__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case__ : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case__ : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case__ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[str]=None ):
# load the pre-trained checkpoints
snake_case__ : List[str] = torch.load(snake_case_ )
snake_case__ : Tuple = WavLMConfigOrig(checkpoint["cfg"] )
snake_case__ : Optional[Any] = WavLMOrig(snake_case_ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
snake_case__ : Tuple = WavLMConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Union[str, Any] = WavLMConfig()
snake_case__ : Optional[Any] = WavLMModel(snake_case_ )
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavlm.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase : str = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 297
| 0
|
'''simple docstring'''
import os
def __UpperCamelCase ( UpperCAmelCase = "matrix.txt" ):
with open(os.path.join(os.path.dirname(UpperCAmelCase ) , UpperCAmelCase ) ) as in_file:
lowercase__ : str = in_file.read()
lowercase__ : int = [[int(UpperCAmelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
lowercase__ : Tuple = [[0 for cell in row] for row in grid]
lowercase__ : Union[str, Any] = len(grid[0] )
lowercase__ : List[str] = [[0 for i in range(UpperCAmelCase )] for j in range(UpperCAmelCase )]
lowercase__ : Union[str, Any] = grid[0][0]
for i in range(1 , UpperCAmelCase ):
lowercase__ : Optional[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase ):
lowercase__ : Union[str, Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase ):
for j in range(1 , UpperCAmelCase ):
lowercase__ : List[str] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 428
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE = frozenset([] )
SCREAMING_SNAKE_CASE = True
@property
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = 1
lowercase__ : Tuple = 4
lowercase__ : Dict = (16, 16)
lowercase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
def _lowerCAmelCase( self ) -> Optional[int]:
torch.manual_seed(0 )
lowercase__ : int = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__lowerCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__lowerCAmelCase , only_cross_attention=__lowerCAmelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
lowercase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
lowercase__ : Optional[Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
lowercase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
lowercase__ : Optional[Any] = CLIPTextModel(__lowerCAmelCase )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : List[Any] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Optional[int]:
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Tuple = '''cpu'''
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
lowercase__ : Any = pipe(**__lowerCAmelCase ).images
lowercase__ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowercase__ : int = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
lowercase__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _lowerCAmelCase( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _lowerCAmelCase( self ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _lowerCAmelCase( self ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _lowerCAmelCase( self ) -> int:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _lowerCAmelCase( self ) -> Any:
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[int] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**__lowerCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
lowercase__ : List[str] = 2
lowercase__ : Dict = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ : Union[str, Any] = getattr(__lowerCAmelCase , scheduler_enum.name )
lowercase__ : Optional[int] = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ : Dict = pipe(**__lowerCAmelCase )[0]
outputs.append(__lowerCAmelCase )
assert check_same_shape(__lowerCAmelCase )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = torch.manual_seed(33 )
lowercase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowercase__ : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : Optional[int] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowercase__ : Any = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , output_type='''latent''' ).images
lowercase__ : Optional[int] = upscaler(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=__lowerCAmelCase , output_type='''np''' , ).images[0]
lowercase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[str] = torch.manual_seed(33 )
lowercase__ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : int = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowercase__ : List[Any] = upscaler(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=__lowerCAmelCase , output_type='''np''' , ).images[0]
lowercase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 428
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
UpperCAmelCase__ : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler("""sample_euler""" )
UpperCAmelCase__ : Any = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : int = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = sd_pipe([prompt] ,generator=snake_case_ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
UpperCAmelCase__ : List[Any] = output.images
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCAmelCase__ : int = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler("""sample_euler""" )
UpperCAmelCase__ : Dict = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = sd_pipe([prompt] ,generator=snake_case_ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" )
UpperCAmelCase__ : Dict = output.images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : List[Any] = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCAmelCase__ : List[Any] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
UpperCAmelCase__ : Any = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = sd_pipe(
[prompt] ,generator=snake_case_ ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type="""np""" ,use_karras_sigmas=snake_case_ ,)
UpperCAmelCase__ : Any = output.images
UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Union[str, Any] = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 65
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def _a ( __lowerCAmelCase : bool , __lowerCAmelCase : bool ):
"""simple docstring"""
def run_func(__lowerCAmelCase : Optional[int] ):
@wraps(__lowerCAmelCase )
def run_in_eager_mode(*__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
@wraps(__lowerCAmelCase )
@tf.function(experimental_compile=__lowerCAmelCase )
def run_in_graph_mode(*__lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[int] ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : int = random.Random()
snake_case__ : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = "TensorFlow"
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return tf.__version__
def __magic_name__ ( self : Any , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : Optional[int] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_inference )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : int = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_train )
def __magic_name__ ( self : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
snake_case__ : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : Union[str, Any] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_inference )
def __magic_name__ ( self : str , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
snake_case__ : str = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : int = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_train )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case__ : str = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : str = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ )
snake_case__ : Tuple = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case__ : Optional[int] = TF_MODEL_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
snake_case__ : Union[str, Any] = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case__ : int = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(snake_case_ , decoder_input_ids=snake_case_ , training=snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(snake_case_ , training=snake_case_ )
snake_case__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case__ : List[Any] = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Union[str, Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : Any = __import__('''transformers''' , fromlist=[model_class] )
snake_case__ : Dict = getattr(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case__ : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
snake_case__ : Optional[Any] = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case__ : Optional[int] = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case__ : List[str] = model(snake_case_ , decoder_input_ids=snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
snake_case__ : Any = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case__ : Dict = model(snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
snake_case__ : Optional[int] = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
snake_case__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __magic_name__ ( self : int , snake_case_ : List[Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(snake_case_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case__ : Optional[int] = timeit.repeat(
snake_case_ , repeat=self.args.repeat , number=1_0 , )
return min(snake_case_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def __magic_name__ ( self : Tuple , snake_case_ : Callable[[], None] ):
'''simple docstring'''
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
snake_case__ : int = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
snake_case__ : int = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
snake_case__ : Dict = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case__ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(snake_case_ )
snake_case__ : Union[str, Any] = meminfo.used
snake_case__ : int = Memory(snake_case_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
snake_case__ : str = None
else:
snake_case__ : Union[str, Any] = measure_peak_memory_cpu(snake_case_ )
snake_case__ : int = Memory(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case__ : int = stop_memory_tracing(snake_case_ )
if memory is None:
snake_case__ : Optional[int] = summary.total
else:
snake_case__ : str = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 347
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus" ):
lowerCAmelCase_ : Optional[Any] =BeautifulSoup(requests.get(__UpperCamelCase ).text , '''html.parser''' )
lowerCAmelCase_ : Optional[int] =soup.findAll('''h1''' )
lowerCAmelCase_ : Optional[int] =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCamelCase , __UpperCamelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 713
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : int =cva.getAffineTransform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cva.warpAffine(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowercase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowercase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowercase , __lowercase = gray_img.shape
# set different points to rotate image
__lowercase = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
__lowercase = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
__lowercase = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
__lowercase = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
__lowercase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowercase = plt.figure(1)
__lowercase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 305
| 0
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :List[Any] = logging.get_logger(__name__)
lowercase__ :str = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
lowercase__ :str = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
return sd
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=rename_keys_prefix ) ->str:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = OrderedDict()
__UpperCAmelCase : Tuple = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[Any] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0] , name_pair[1] )
__UpperCAmelCase : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Tuple = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : Optional[int] = '''pretraining'''
if "vcr" in checkpoint_path:
__UpperCAmelCase : Union[str, Any] = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Any = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Any = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Tuple = {'''visual_embedding_dim''': 5_12}
__UpperCAmelCase : str = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Union[str, Any] = {'''visual_embedding_dim''': 20_48}
__UpperCAmelCase : List[Any] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Any = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
__UpperCAmelCase : Optional[Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
__UpperCAmelCase : Union[str, Any] = '''nlvr'''
__UpperCAmelCase : str = VisualBertConfig(**UpperCAmelCase_ )
# Load State Dict
__UpperCAmelCase : int = load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : int = get_new_dict(UpperCAmelCase_ , UpperCAmelCase_ )
if model_type == "pretraining":
__UpperCAmelCase : int = VisualBertForPreTraining(UpperCAmelCase_ )
elif model_type == "vqa":
__UpperCAmelCase : Dict = VisualBertForQuestionAnswering(UpperCAmelCase_ )
elif model_type == "nlvr":
__UpperCAmelCase : Optional[Any] = VisualBertForVisualReasoning(UpperCAmelCase_ )
elif model_type == "multichoice":
__UpperCAmelCase : Optional[int] = VisualBertForMultipleChoice(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
# Save Checkpoints
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
lowercase__ :str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 522
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'SpeechT5FeatureExtractor'
_A : List[Any] = 'SpeechT5Tokenizer'
def __init__( self : List[str] , __lowercase : List[str] , __lowercase : int ):
'''simple docstring'''
super().__init__(__lowercase , __lowercase )
def __call__( self : Any , *__lowercase : List[str] , **__lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = kwargs.pop('''audio''' , __lowercase )
__UpperCAmelCase : str = kwargs.pop('''text''' , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop('''text_target''' , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop('''audio_target''' , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop('''sampling_rate''' , __lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__UpperCAmelCase : Union[str, Any] = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
elif text is not None:
__UpperCAmelCase : Optional[int] = self.tokenizer(__lowercase , **__lowercase )
else:
__UpperCAmelCase : List[Any] = None
if audio_target is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(audio_target=__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = targets['''input_values''']
elif text_target is not None:
__UpperCAmelCase : Optional[Any] = self.tokenizer(__lowercase , **__lowercase )
__UpperCAmelCase : int = targets['''input_ids''']
else:
__UpperCAmelCase : List[str] = None
if inputs is None:
return targets
if targets is not None:
__UpperCAmelCase : Any = labels
__UpperCAmelCase : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__UpperCAmelCase : List[Any] = decoder_attention_mask
return inputs
def A_ ( self : List[str] , *__lowercase : Dict , **__lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = kwargs.pop('''input_values''' , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop('''input_ids''' , __lowercase )
__UpperCAmelCase : str = kwargs.pop('''labels''' , __lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
elif input_ids is not None:
__UpperCAmelCase : Any = self.tokenizer.pad(__lowercase , **__lowercase )
else:
__UpperCAmelCase : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowercase , __lowercase ) and "input_ids" in labels[0]):
__UpperCAmelCase : str = self.tokenizer.pad(__lowercase , **__lowercase )
__UpperCAmelCase : str = targets['''input_ids''']
else:
__UpperCAmelCase : Union[str, Any] = self.feature_extractor.feature_size
__UpperCAmelCase : str = self.feature_extractor.num_mel_bins
__UpperCAmelCase : List[Any] = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
__UpperCAmelCase : Dict = feature_size_hack
__UpperCAmelCase : Union[str, Any] = targets['''input_values''']
else:
__UpperCAmelCase : Tuple = None
if inputs is None:
return targets
if targets is not None:
__UpperCAmelCase : Tuple = labels
__UpperCAmelCase : int = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__UpperCAmelCase : Dict = decoder_attention_mask
return inputs
def A_ ( self : Optional[int] , *__lowercase : Optional[int] , **__lowercase : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def A_ ( self : Union[str, Any] , *__lowercase : Dict , **__lowercase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
| 522
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
a__ : Optional[Any] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a__ : Union[str, Any] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
a__ : List[Any] = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def _UpperCamelCase ( self : Optional[Any] , a_ : List[Any] , a_ : List[str] , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , ):
"""simple docstring"""
lowerCamelCase__ = len(references[0] )
if any(len(a_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase__ = [[refs[i] for refs in references] for i in range(a_ )]
lowerCamelCase__ = TER(
normalized=a_ , no_punct=a_ , asian_support=a_ , case_sensitive=a_ , )
lowerCamelCase__ = sb_ter.corpus_score(a_ , a_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 235
|
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return str(UpperCamelCase ) == str(UpperCamelCase )[::-1]
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
return int(UpperCamelCase ) + int(str(UpperCamelCase )[::-1] )
def snake_case (UpperCamelCase : int = 10000 ):
'''simple docstring'''
lowerCamelCase__ = []
for num in range(1 , UpperCamelCase ):
lowerCamelCase__ = 0
lowerCamelCase__ = num
while iterations < 50:
lowerCamelCase__ = sum_reverse(UpperCamelCase )
iterations += 1
if is_palindrome(UpperCamelCase ):
break
else:
lychrel_nums.append(UpperCamelCase )
return len(UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A ={
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 407
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 611
| 0
|
_UpperCamelCase: int ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _a ( ):
"""simple docstring"""
_lowerCAmelCase = input('Enter message: ' )
_lowerCAmelCase = input('Enter key [alphanumeric]: ' )
_lowerCAmelCase = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
_lowerCAmelCase = 'encrypt'
_lowerCAmelCase = encrypt_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif mode.lower().startswith('d' ):
_lowerCAmelCase = 'decrypt'
_lowerCAmelCase = decrypt_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(f'''\n{mode.title()}ed message:''' )
print(__SCREAMING_SNAKE_CASE )
def _a ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return translate_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'encrypt' )
def _a ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return translate_message(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'decrypt' )
def _a ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = key.upper()
for symbol in message:
_lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__SCREAMING_SNAKE_CASE )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 0
else:
translated.append(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 585
|
import requests
from bsa import BeautifulSoup
def _a ( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_lowerCAmelCase = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowerCAmelCase = soup.findAll('h1' )
_lowerCAmelCase = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 585
| 1
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path", ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision", [None, "v2"] )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : int = hf_hub_url(repo_id=snake_case__, path=snake_case__, revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 382
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = (DPMSolverSDEScheduler,)
lowerCamelCase__: int = 10
def _lowerCamelCase ( self: Any , **__lowerCamelCase: Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**__lowerCamelCase )
return config
def _lowerCamelCase ( self: List[str] ) -> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : str = self.dummy_model()
__UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : List[Any] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : List[Any] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : int = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
__UpperCAmelCase : List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase : List[str] = self.dummy_model()
__UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase : Optional[int] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase : Optional[int] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = output.prev_sample
__UpperCAmelCase : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
__UpperCAmelCase : int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase )
__UpperCAmelCase : int = self.dummy_model()
__UpperCAmelCase : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCAmelCase : int = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : List[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : List[Any] = scheduler_class(**__lowerCamelCase , use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.dummy_model()
__UpperCAmelCase : Any = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
__UpperCAmelCase : Dict = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
__UpperCAmelCase : Dict = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = output.prev_sample
__UpperCAmelCase : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
__UpperCAmelCase : str = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 382
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 720
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.