code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A_ ( A__ , A__ , A__=1024 , A__=1024 , A__=False , **A__ ) -> List[str]:
a__ : int = AutoTokenizer.from_pretrained(A__ )
a__ : Union[str, Any] = SeqaSeqDataset(A__ , A__ , A__ , A__ , type_path='train' , **A__ )
a__ : Optional[int] = tok.pad_token_id
def get_lens(A__ ):
a__ : Optional[Any] = tqdm(
DataLoader(A__ , batch_size=512 , num_workers=8 , shuffle=A__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
a__ : int = []
for batch in dl:
a__ : int = batch['input_ids'].ne(A__ ).sum(1 ).tolist()
a__ : Dict = batch['labels'].ne(A__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(A__ , A__ ):
max_lens.append(max(A__ , A__ ) )
else:
max_lens.extend(A__ )
return max_lens
a__ : Any = get_lens(A__ )
a__ : List[str] = SeqaSeqDataset(A__ , A__ , A__ , A__ , type_path='val' , **A__ )
a__ : Union[str, Any] = get_lens(A__ )
pickle_save(A__ , train_ds.len_file )
pickle_save(A__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 99 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_UpperCamelCase = sorted(numsa + numsa )
_UpperCamelCase , _UpperCamelCase = divmod(len(__snake_case ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = [float(x) for x in input("""Enter the elements of first array: """).split()]
_a = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 194 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
raise NotImplementedError()
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = tokenizer
_lowercase : Any = skip_prompt
_lowercase : Optional[int] = decode_kwargs
# variables used in the streaming process
_lowercase : Dict = []
_lowercase : Optional[int] = 0
_lowercase : Union[str, Any] = True
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1')
elif len(value.shape) > 1:
_lowercase : List[Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowercase : Dict = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
_lowercase : List[Any] = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith('\n'):
_lowercase : Any = text[self.print_len :]
_lowercase : Any = []
_lowercase : Dict = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase) > 0 and self._is_chinese_char(ord(text[-1])):
_lowercase : List[Any] = text[self.print_len :]
self.print_len += len(lowerCamelCase)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowercase : Dict = text[self.print_len : text.rfind(' ') + 1]
self.print_len += len(lowerCamelCase)
self.on_finalized_text(lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
if len(self.token_cache) > 0:
_lowercase : List[Any] = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
_lowercase : Optional[int] = text[self.print_len :]
_lowercase : Union[str, Any] = []
_lowercase : int = 0
else:
_lowercase : Tuple = ''
_lowercase : Dict = True
self.on_finalized_text(lowerCamelCase, stream_end=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False) -> Any:
"""simple docstring"""
print(lowerCamelCase, flush=lowerCamelCase, end='' if not stream_end else None)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase, **lowerCamelCase)
_lowercase : List[str] = Queue()
_lowercase : Union[str, Any] = None
_lowercase : List[str] = timeout
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False) -> Any:
"""simple docstring"""
self.text_queue.put(lowerCamelCase, timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout)
def __iter__( self) -> List[str]:
"""simple docstring"""
return self
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 84 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
super().__init__()
_lowercase : Union[str, Any] = nn.ModuleList(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = True, ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase, lowerCamelCase, self.nets)):
_lowercase , _lowercase : List[Any] = controlnet(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# merge samples
if i == 0:
_lowercase , _lowercase : int = down_samples, mid_sample
else:
_lowercase : Dict = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase, lowerCamelCase)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = None, ) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = 0
_lowercase : int = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase, is_main_process=lowerCamelCase, save_function=lowerCamelCase, safe_serialization=lowerCamelCase, variant=lowerCamelCase, )
idx += 1
_lowercase : Any = model_path_to_save + F'''_{idx}'''
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = 0
_lowercase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowercase : Union[str, Any] = pretrained_model_path
while os.path.isdir(lowerCamelCase):
_lowercase : Optional[int] = ControlNetModel.from_pretrained(lowerCamelCase, **lowerCamelCase)
controlnets.append(lowerCamelCase)
idx += 1
_lowercase : List[Any] = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(lowerCamelCase)} controlnets loaded from {pretrained_model_path}.''')
if len(lowerCamelCase) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(lowerCamelCase)}. Expected at least {pretrained_model_path + "_0"}.''')
return cls(lowerCamelCase)
| 84 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = CodeGenTokenizer
__UpperCamelCase : int = CodeGenTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = {'''add_prefix_space''': True}
__UpperCamelCase : str = False
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ : List[str] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer"""
return input_text, output_text
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : int = """lower newer"""
SCREAMING_SNAKE_CASE__ : Dict = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : str = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
pass
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=15 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
SCREAMING_SNAKE_CASE__ : Any = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
SCREAMING_SNAKE_CASE__ : str = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
SCREAMING_SNAKE_CASE__ : str = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = """$$$"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
SCREAMING_SNAKE_CASE__ : str = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
SCREAMING_SNAKE_CASE__ : int = """\nif len_a > len_b: result = a\nelse: result = b"""
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
| 25 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax""", """transformers"""]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
| 34 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A_ :
'''simple docstring'''
__snake_case = 42
__snake_case = None
__snake_case = None
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Node(1 )
__lowerCamelCase : Any = Node(2 )
__lowerCamelCase : List[Any] = Node(3 )
__lowerCamelCase : Tuple = Node(4 )
__lowerCamelCase : Any = Node(5 )
return tree
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[Any] = []
if root is None:
return output
__lowerCamelCase : Optional[int] = deque([root] )
while process_queue:
__lowerCamelCase : Optional[int] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[Any] = []
def populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[Any] = []
def populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if root is None:
return []
__lowerCamelCase : list[Sequence[Node | None]] = []
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Tuple = height(SCREAMING_SNAKE_CASE__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : List[str] = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Tuple = 0
return output
def UpperCamelCase__ ( ): # Main function for testing.
__lowerCamelCase : List[Any] = make_tree()
print(f'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE__ )}' )
print(f'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE__ )}' )
print(f'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE__ )}' , '\n' )
print(f'Height of Tree: {height(SCREAMING_SNAKE_CASE__ )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(SCREAMING_SNAKE_CASE__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(SCREAMING_SNAKE_CASE__ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , level=SCREAMING_SNAKE_CASE__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 358 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = 0.00
__lowerCamelCase : Tuple = 0
for resistor in resistors:
if resistor <= 0:
__lowerCamelCase : Union[str, Any] = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(SCREAMING_SNAKE_CASE__ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE__ )
index += 1
return 1 / first_sum
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = 0.00
__lowerCamelCase : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__lowerCamelCase : Any = f'Resistor at index {index} has a negative value!'
raise ValueError(SCREAMING_SNAKE_CASE__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase__ ( lowercase ):
lowercase__ = """glpn"""
def __init__( self : str ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : List[str]=[2, 2, 2, 2] ,lowerCamelCase__ : int=[8, 4, 2, 1] ,lowerCamelCase__ : List[Any]=[32, 64, 160, 256] ,lowerCamelCase__ : Optional[Any]=[7, 3, 3, 3] ,lowerCamelCase__ : Union[str, Any]=[4, 2, 2, 2] ,lowerCamelCase__ : int=[1, 2, 5, 8] ,lowerCamelCase__ : int=[4, 4, 4, 4] ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Optional[Any]=1E-6 ,lowerCamelCase__ : Optional[Any]=64 ,lowerCamelCase__ : Any=10 ,lowerCamelCase__ : Tuple=-1 ,**lowerCamelCase__ : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[Any] = num_encoder_blocks
_UpperCamelCase : List[Any] = depths
_UpperCamelCase : Tuple = sr_ratios
_UpperCamelCase : List[str] = hidden_sizes
_UpperCamelCase : Dict = patch_sizes
_UpperCamelCase : List[Any] = strides
_UpperCamelCase : Any = mlp_ratios
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Dict = drop_path_rate
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : str = decoder_hidden_size
_UpperCamelCase : int = max_depth
_UpperCamelCase : Dict = head_in_index
| 83 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase_ = model
lowerCAmelCase_ = 2
lowerCAmelCase_ = nn.Linear(self.model.config.hidden_size, self.num_labels )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def __UpperCamelCase ( _A , _A , _A ):
# load longformer model from model identifier
lowerCAmelCase_ = LongformerModel.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ = LightningModel(lowerCAmelCase_ )
lowerCAmelCase_ = torch.load(lowerCAmelCase_ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowerCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase_ )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 369 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A , _A , _A , _A=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCAmelCase_ = os.path.abspath(_A )
logger.info(f"Loading PyTorch weights from {pt_path}" )
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
lowerCAmelCase_ = convert_pytorch_state_dict_to_flax(_A , _A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCAmelCase_ = convert_pytorch_sharded_state_dict_to_flax(_A , _A )
return flax_state_dict
def __UpperCamelCase ( _A , _A , _A , _A , ):
def is_key_or_prefix_key_in_dict(_A ) -> bool:
return len(set(_A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_A ):
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_A ):
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCAmelCase_ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCAmelCase_ = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCAmelCase_ = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCAmelCase_ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase ( _A , _A ):
# convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCAmelCase_ = flax_model.params['''params''']
else:
lowerCAmelCase_ = flax_model.params
lowerCAmelCase_ = flatten_dict(_A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase_ = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(_A )
lowerCAmelCase_ = {}
lowerCAmelCase_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCAmelCase_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase_ = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(
_A , _A , _A , _A )
# add model prefix if necessary
lowerCAmelCase_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCAmelCase_ = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A , _A )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
return unflatten_dict(_A )
def __UpperCamelCase ( _A , _A ):
import torch
# Load the index
lowerCAmelCase_ = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCAmelCase_ = torch.load(_A )
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase_ = flax_model.params['''params''']
lowerCAmelCase_ = flatten_dict(_A )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCAmelCase_ = flax_model.params
lowerCAmelCase_ = flatten_dict(_A )
lowerCAmelCase_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCAmelCase_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase_ = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(
_A , _A , _A , _A )
# add model prefix if necessary
lowerCAmelCase_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCAmelCase_ = jnp.asarray(_A )
continue
if "var" in flax_key[-1]:
lowerCAmelCase_ = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A , _A )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(_A )
return unflatten_dict(_A )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = os.path.abspath(_A )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
lowerCAmelCase_ = getattr(_A , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(_A , '''rb''' ) as state_f:
try:
lowerCAmelCase_ = from_bytes(_A , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_A , _A )
def __UpperCamelCase ( _A , _A ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCAmelCase_ = flatten_dict(jax.tree_util.tree_map(lambda _A : x.dtype == jnp.bfloataa , _A ) ).values()
if any(_A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCAmelCase_ = jax.tree_util.tree_map(
lambda _A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _A )
lowerCAmelCase_ = flatten_dict(_A )
lowerCAmelCase_ = pt_model.state_dict()
lowerCAmelCase_ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCAmelCase_ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCAmelCase_ = []
lowerCAmelCase_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase_ = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCAmelCase_ = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase_ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase_ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_A ) not in pt_model_dict:
# conv layer
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase_ = jnp.transpose(_A , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_A ) not in pt_model_dict:
# linear layer
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCAmelCase_ = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCAmelCase_ = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCAmelCase_ = '''.'''.join(_A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCAmelCase_ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCAmelCase_ = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCAmelCase_ = key_components[-2] + '''_v'''
if name is not None:
lowerCAmelCase_ = key_components[:-3] + [name]
lowerCAmelCase_ = '''.'''.join(_A )
lowerCAmelCase_ = key
if flax_key in special_pt_names:
lowerCAmelCase_ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
lowerCAmelCase_ = np.asarray(_A ) if not isinstance(_A , np.ndarray ) else flax_tensor
lowerCAmelCase_ = torch.from_numpy(_A )
# remove from missing keys
missing_keys.remove(_A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_A )
pt_model.load_state_dict(_A )
# re-transform missing_keys to list
lowerCAmelCase_ = list(_A )
if len(_A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(_A ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 167 | 0 |
"""simple docstring"""
lowerCamelCase_ : List[Any] = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 81 |
"""simple docstring"""
lowerCamelCase_ : int = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
} | 81 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowercase ( A ):
'''simple docstring'''
_A : bool = field(default=A, metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
_A : bool = field(
default=A, metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_A : Optional[int] = field(
default=A, metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
}, )
_A : Optional[int] = field(
default=A, metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
}, )
_A : Optional[Union[str, Path, GenerationConfig]] = field(
default=A, metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
}, )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(_a , _a ):
UpperCamelCase__ = v.to_dict()
return d
| 35 | from __future__ import annotations
from collections import Counter
from random import random
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] ):
UpperCamelCase__ = {}
def A_ ( self : List[Any] , _a : str ):
UpperCamelCase__ = {}
def A_ ( self : List[Any] , _a : str , _a : str , _a : float ):
if nodea not in self.connections:
self.add_node(_a )
if nodea not in self.connections:
self.add_node(_a )
UpperCamelCase__ = probability
def A_ ( self : Optional[Any] ):
return list(self.connections )
def A_ ( self : Tuple , _a : str ):
UpperCamelCase__ = 0
UpperCamelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : list[tuple[str, str, float]], UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = Counter(graph.get_nodes() )
UpperCamelCase__ = start
for _ in range(UpperCamelCase__ ):
UpperCamelCase__ = graph.transition(UpperCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger("transformers.models.speecht5")
_lowerCAmelCase : int = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_lowerCAmelCase : List[str] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_lowerCAmelCase : List[Any] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_lowerCAmelCase : Tuple = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_lowerCAmelCase : Tuple = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_lowerCAmelCase : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_lowerCAmelCase : str = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_lowerCAmelCase : Optional[Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_lowerCAmelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_lowerCAmelCase : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_lowerCAmelCase : List[str] = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_lowerCAmelCase : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Tuple , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
for attribute in key.split('.' ):
__a =getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__a =getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__a =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__a =value
elif weight_type == "weight_g":
__a =value
elif weight_type == "weight_v":
__a =value
elif weight_type == "bias":
__a =value
elif weight_type == "running_mean":
__a =value
elif weight_type == "running_var":
__a =value
elif weight_type == "num_batches_tracked":
__a =value
else:
__a =value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCamelCase_( _snake_case : str , _snake_case : Dict ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__a , __a =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
__a =[]
if task == "s2t":
__a =hf_model.speechta.encoder.prenet.feature_encoder
__a =MAPPING_S2T
__a =IGNORE_KEYS_S2T
elif task == "t2s":
__a =None
__a =MAPPING_T2S
__a =IGNORE_KEYS_T2S
elif task == "s2s":
__a =hf_model.speechta.encoder.prenet.feature_encoder
__a =MAPPING_S2S
__a =IGNORE_KEYS_S2S
else:
raise ValueError(F'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F'{name} was ignored' )
continue
__a =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__a =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__a , __a =key.split('.*.' )
if prefix in name and suffix in name:
__a =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__a =True
if "*" in mapped_key:
__a =name.split(UpperCamelCase__ )[0].split('.' )[-2]
__a =mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__a ='weight_g'
elif "weight_v" in name:
__a ='weight_v'
elif "bias" in name:
__a ='bias'
elif "weight" in name:
__a ='weight'
elif "running_mean" in name:
__a ='running_mean'
elif "running_var" in name:
__a ='running_var'
elif "num_batches_tracked" in name:
__a ='num_batches_tracked'
else:
__a =None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCamelCase_( _snake_case : Any , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =full_name.split('conv_layers.' )[-1]
__a =name.split('.' )
__a =int(items[0] )
__a =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
if config_path is not None:
__a =SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__a =SpeechTaConfig()
if task == "s2t":
__a =config.max_text_positions
__a =SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__a =1876
__a =600
__a =config.max_speech_positions
__a =SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__a =1876
__a =config.max_speech_positions
__a =SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F'Unknown task name: {task}' )
if vocab_path:
__a =SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__a =AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__a =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__a =SpeechTaFeatureExtractor()
__a =SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__a =torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 218 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=1_6 ,A__=3_6 ,A__=6 ,A__=6 ,A__=6 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,token_type_ids=A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForPreTraining(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,sentence_order_label=A__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForMaskedLM(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = AlbertForMultipleChoice(config=A__)
model.to(A__)
model.eval()
lowercase = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : str =True
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = super()._prepare_for_class(A__ ,A__ ,return_labels=A__)
if return_labels:
if model_class in get_values(A__):
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
return inputs_dict
def A__ ( self):
lowercase = AlbertModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
@slow
def A__ ( self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AlbertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = AlbertModel.from_pretrained('''albert-base-v2''')
lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase = model(A__ ,attention_mask=A__)[0]
lowercase = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape ,A__)
lowercase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A__ ,atol=1E-4))
| 354 |
lowercase__ :List[str] = 6_5521
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 1
lowercase = 0
for plain_chr in plain_text:
lowercase = (a + ord(lowerCAmelCase__ )) % MOD_ADLER
lowercase = (b + a) % MOD_ADLER
return (b << 16) | a
| 97 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE):
UpperCamelCase__ = '''switch_transformers'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : int , lowercase_ : Dict=32128 , lowercase_ : Optional[int]=768 , lowercase_ : str=64 , lowercase_ : Tuple=2048 , lowercase_ : int=64 , lowercase_ : str=12 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=12 , lowercase_ : Tuple=3 , lowercase_ : Union[str, Any]=12 , lowercase_ : Optional[Any]=8 , lowercase_ : List[str]=False , lowercase_ : Optional[Any]=0.01 , lowercase_ : Optional[Any]="float32" , lowercase_ : List[Any]=False , lowercase_ : Any=32 , lowercase_ : Optional[Any]=128 , lowercase_ : List[str]=0.1 , lowercase_ : Union[str, Any]=1E-6 , lowercase_ : str=0.0_01 , lowercase_ : Union[str, Any]=0.0_01 , lowercase_ : str=1.0 , lowercase_ : List[str]="relu" , lowercase_ : Any=True , lowercase_ : Any=False , lowercase_ : List[str]=True , lowercase_ : List[Any]=0 , lowercase_ : List[Any]=1 , **lowercase_ : Optional[Any] , ):
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : int = d_model
lowercase_ : str = d_kv
lowercase_ : Union[str, Any] = d_ff
lowercase_ : Tuple = num_sparse_encoder_layers
lowercase_ : Optional[Any] = num_layers
lowercase_ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : Optional[int] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase_ : Any = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase_ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase_ : List[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase_ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase_ : int = num_heads
lowercase_ : Any = num_experts
lowercase_ : Union[str, Any] = expert_capacity
lowercase_ : Tuple = router_bias
lowercase_ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase_ : int = router_dtype
lowercase_ : Any = router_ignore_padding_tokens
lowercase_ : Any = relative_attention_num_buckets
lowercase_ : Optional[Any] = relative_attention_max_distance
lowercase_ : Union[str, Any] = dropout_rate
lowercase_ : Optional[int] = layer_norm_epsilon
lowercase_ : List[str] = initializer_factor
lowercase_ : str = feed_forward_proj
lowercase_ : Dict = use_cache
lowercase_ : Any = add_router_probs
lowercase_ : int = router_z_loss_coef
lowercase_ : List[Any] = router_aux_loss_coef
lowercase_ : Tuple = self.feed_forward_proj.split("""-""" )
lowercase_ : Any = act_info[-1]
lowercase_ : Union[str, Any] = act_info[0] == """gated"""
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Any = """gelu_new"""
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
| 239 |
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCAmelCase: List[str] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCAmelCase: List[str] = [0, 2_5, 5_0]
lowerCAmelCase: int = [2_5, 5_0, 7_5]
lowerCAmelCase: Any = fuzz.membership.trimf(X, abca)
lowerCAmelCase: Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCAmelCase: str = np.ones(7_5)
lowerCAmelCase: Union[str, Any] = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowerCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCAmelCase: List[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCAmelCase: str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCAmelCase: Union[str, Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCAmelCase: Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCAmelCase: List[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCAmelCase: int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 370 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase: Optional[int] = parser.parse_args()
lowerCAmelCase: List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase: Optional[Any] = CLIPImageProcessor()
lowerCAmelCase: Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase: List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 96 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 245 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : List[str]=[3_0, 3_0] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=1_0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Dict=1_0 , ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Optional[Any] = n_targets
SCREAMING_SNAKE_CASE : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE : int = num_patches + 1 + self.num_detection_tokens
def _lowercase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE : str = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.rand(self.n_targets , 4 , device=UpperCAmelCase__ )
labels.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = YolosModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowercase ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = YolosForObjectDetection(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE : int = model(pixel_values=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowercase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase__ : Any =(
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase__ : Tuple =False
UpperCAmelCase__ : int =False
UpperCAmelCase__ : Tuple =False
UpperCAmelCase__ : Optional[Any] =False
def _lowercase ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=False ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCAmelCase__ , dtype=torch.long )
SCREAMING_SNAKE_CASE : str = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCAmelCase__ , dtype=torch.float )
labels.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = labels
return inputs_dict
def _lowercase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = YolosModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : str = outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase__ )
@slow
def _lowercase ( self : str ) ->List[Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = YolosModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def __lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def _lowercase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE : int = image_processor.post_process_object_detection(
UpperCAmelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE : str = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = [7_5, 7_5, 1_7, 6_3, 1_7]
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(UpperCAmelCase__ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCAmelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCAmelCase__ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCAmelCase__ ) )
| 245 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase( __lowerCamelCase ):
return (data["data"], data["target"])
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = XGBClassifier()
classifier.fit(UpperCAmelCase_ , UpperCAmelCase_ )
return classifier
def lowerCAmelCase( ):
__a = load_iris()
__a , __a = data_handling(UpperCAmelCase_ )
__a , __a , __a , __a = train_test_split(
UpperCAmelCase_ , UpperCAmelCase_ , test_size=0.25 )
__a = iris['target_names']
# Create an XGBoost Classifier from the training data
__a = xgboost(UpperCAmelCase_ , UpperCAmelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , display_labels=UpperCAmelCase_ , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 353 | from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__snake_case ):
A__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 197 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 121 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase : Optional[int] = get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = """all_checks"""
_snake_case = """basic_checks"""
_snake_case = """no_checks"""
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=None ) -> Any:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase ) - set(lowercase ) ) )
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase ) - set(lowercase ) ) )
snake_case : Dict = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case : Union[str, Any] = """ for """ + verification_name if verification_name is not None else """"""
if len(lowercase ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase ) - set(lowercase ) ) )
if len(set(lowercase ) - set(lowercase ) ) > 0:
raise UnexpectedSplits(str(set(lowercase ) - set(lowercase ) ) )
snake_case : int = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase ) )
logger.info("""All the splits matched successfully.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = True ) -> dict:
if record_checksum:
snake_case : Dict = shaaaa()
with open(lowercase ,"""rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) ,b"""""" ):
m.update(lowercase )
snake_case : Optional[Any] = m.hexdigest()
else:
snake_case : int = None
return {"num_bytes": os.path.getsize(lowercase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 176 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCAmelCase : Tuple = parser.parse_args()
UpperCAmelCase : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase : Any = CLIPImageProcessor()
UpperCAmelCase : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCAmelCase : Dict = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 136 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "T5Config"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
| 136 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : int = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __snake_case ( _lowerCAmelCase : List[str] ) -> Optional[int]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A_ : List[str] = k.replace(_lowerCAmelCase , _lowerCAmelCase )
if k.startswith("encoder" ):
A_ : Optional[Any] = k.replace(".attn" , ".self_attn" )
A_ : List[str] = k.replace("norm1" , "self_attn_layer_norm" )
A_ : str = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
A_ : Union[str, Any] = k.replace("norm1" , "self_attn_layer_norm" )
A_ : Any = k.replace("norm2" , "encoder_attn_layer_norm" )
A_ : Optional[int] = k.replace("norm3" , "final_layer_norm" )
return k
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> int:
A_ : Any = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
A_ : str = sd.pop(_lowerCAmelCase )
A_ : Optional[Any] = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
A_ : List[str] = v
_lowerCAmelCase : List[Any] = ['''START''']
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ) -> List[str]:
A_ : Dict = torch.load(_lowerCAmelCase , map_location="cpu" )
A_ : Optional[Any] = model["model"]
A_ : List[str] = BlenderbotConfig.from_json_file(_lowerCAmelCase )
A_ : str = BlenderbotForConditionalGeneration(_lowerCAmelCase )
A_ : Optional[int] = m.model.state_dict().keys()
A_ : List[Any] = []
A_ : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A_ : List[Any] = rename_state_dict_key(_lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A_ : Tuple = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCAmelCase )
m.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
m.half()
m.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 360 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
_lowerCAmelCase : Dict[Optional[str], str] = {}
_lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __snake_case ( _lowerCAmelCase : type , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None , ) -> List[Any]:
A_ : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
A_ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
A_ : Union[str, Any] = format_type
def __snake_case ( _lowerCAmelCase : Exception , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None ) -> Optional[int]:
A_ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCAmelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCAmelCase : Tuple = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCAmelCase : List[str] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __snake_case ( _lowerCAmelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : str ) -> Formatter:
A_ : str = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 70 | 0 |
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not sentence:
return ""
SCREAMING_SNAKE_CASE : int = dict(zip(a__ , a__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 313 |
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =CodeGenTokenizer
UpperCAmelCase_ =CodeGenTokenizerFast
UpperCAmelCase_ =True
UpperCAmelCase_ ={"add_prefix_space": True}
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , **_A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = '''lower newer'''
return input_text, output_text
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A , add_prefix_space=_A )
self.assertListEqual(_A , _A )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def _UpperCamelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
# Testing tokenization
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A , add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# Testing the unknown token
SCREAMING_SNAKE_CASE_ = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) , _A )
def _UpperCamelCase ( self , *_A , **_A ) -> str:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _UpperCamelCase ( self , _A=15 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input looooooooong''', '''This is a simple input''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = tokenizer(*_A , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = '''$$$'''
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_A , add_bos_token=_A )
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
self.assertEqual(out_s.input_ids[0] , _A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
SCREAMING_SNAKE_CASE_ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
SCREAMING_SNAKE_CASE_ = '''\nif len_a > len_b: result = a\nelse: result = b'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE_ = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , truncate_before_pattern=_A )
self.assertEqual(_A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
pass
| 257 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 1 |
import math
import os
import sys
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = """"""
try:
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as binary_file:
__a = binary_file.read()
for dat in data:
__a = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict[str, str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lexicon.pop(_SCREAMING_SNAKE_CASE )
__a = last_match_id
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
__a = """0""" + lexicon[curr_key]
__a = bin(_SCREAMING_SNAKE_CASE )[2:]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = {"""0""": """0""", """1""": """1"""}
__a , __a = """""", """"""
__a = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
index += 1
__a = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__a = lexicon[curr_string]
result += last_match_id
return result
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = os.path.getsize(_SCREAMING_SNAKE_CASE )
__a = bin(_SCREAMING_SNAKE_CASE )[2:]
__a = len(_SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = 8
try:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as opened_file:
__a = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = read_file_binary(_SCREAMING_SNAKE_CASE )
__a = compress_data(_SCREAMING_SNAKE_CASE )
__a = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 302 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302 | 1 |
"""simple docstring"""
__A = 9.80_665
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = g ) -> Any:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 355 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase ( __snake_case : int , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : str=True , __snake_case : int="pt" ):
lowercase_ : int = {'''add_prefix_space''': True} if isinstance(__snake_case , __snake_case ) and not line.startswith(''' ''' ) else {}
lowercase_ : str = padding_side
return tokenizer(
[line] , max_length=__snake_case , padding='''max_length''' if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , )
def lowercase ( __snake_case : Dict , __snake_case : Dict , __snake_case : int=None , ):
lowercase_ : Optional[Any] = input_ids.ne(__snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _UpperCAmelCase ( _A ):
def __init__( self : Any , A : str , A : Optional[Any] , A : int , A : Optional[Any] , A : Optional[int]="train" , A : Dict=None , A : int=None , A : Union[str, Any]=None , A : List[str]="" , ) -> List[str]:
super().__init__()
lowercase_ : Any = Path(A ).joinpath(type_path + '''.source''' )
lowercase_ : int = Path(A ).joinpath(type_path + '''.target''' )
lowercase_ : Optional[Any] = self.get_char_lens(self.src_file )
lowercase_ : str = max_source_length
lowercase_ : Any = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
lowercase_ : Union[str, Any] = tokenizer
lowercase_ : Optional[int] = prefix
if n_obs is not None:
lowercase_ : Union[str, Any] = self.src_lens[:n_obs]
lowercase_ : str = src_lang
lowercase_ : Any = tgt_lang
def __len__( self : List[Any] ) -> str:
return len(self.src_lens )
def __getitem__( self : Any , A : List[str] ) -> Dict[str, torch.Tensor]:
lowercase_ : List[str] = index + 1 # linecache starts at 1
lowercase_ : int = self.prefix + linecache.getline(str(self.src_file ) , A ).rstrip('''\n''' )
lowercase_ : int = linecache.getline(str(self.tgt_file ) , A ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A ) else self.tokenizer
)
lowercase_ : Any = self.tokenizer.generator if isinstance(self.tokenizer , A ) else self.tokenizer
lowercase_ : str = encode_line(A , A , self.max_source_length , '''right''' )
lowercase_ : Optional[Any] = encode_line(A , A , self.max_target_length , '''right''' )
lowercase_ : Any = source_inputs['''input_ids'''].squeeze()
lowercase_ : List[Any] = target_inputs['''input_ids'''].squeeze()
lowercase_ : List[str] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A ( A : Tuple ) -> List[str]:
return [len(A ) for x in Path(A ).open().readlines()]
def A ( self : Dict , A : Union[str, Any] ) -> Dict[str, torch.Tensor]:
lowercase_ : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase_ : str = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase_ : int = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase_ : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A )
else self.tokenizer.pad_token_id
)
lowercase_ : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A )
else self.tokenizer.pad_token_id
)
lowercase_ : int = trim_batch(A , A )
lowercase_ , lowercase_ : List[Any] = trim_batch(A , A , attention_mask=A )
lowercase_ : List[str] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__A : Tuple = getLogger(__name__)
def lowercase ( __snake_case : List[List] ):
return list(itertools.chain.from_iterable(__snake_case ) )
def lowercase ( __snake_case : str ):
lowercase_ : str = get_git_info()
save_json(__snake_case , os.path.join(__snake_case , '''git_log.json''' ) )
def lowercase ( __snake_case : int , __snake_case : List[Any] , __snake_case : Any=4 , **__snake_case : Dict ):
with open(__snake_case , '''w''' ) as f:
json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case )
def lowercase ( __snake_case : Tuple ):
with open(__snake_case ) as f:
return json.load(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = git.Repo(search_parent_directories=__snake_case )
lowercase_ : Optional[Any] = {
'''repo_id''': str(__snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowercase ( __snake_case : Callable , __snake_case : Iterable ):
return list(map(__snake_case , __snake_case ) )
def lowercase ( __snake_case : Any , __snake_case : List[str] ):
with open(__snake_case , '''wb''' ) as f:
return pickle.dump(__snake_case , __snake_case )
def lowercase ( __snake_case : Any ):
def remove_articles(__snake_case : List[Any] ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , __snake_case )
def white_space_fix(__snake_case : List[str] ):
return " ".join(text.split() )
def remove_punc(__snake_case : Optional[int] ):
lowercase_ : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def lowercase ( __snake_case : List[Any] , __snake_case : Dict ):
lowercase_ : Optional[Any] = normalize_answer(__snake_case ).split()
lowercase_ : List[Any] = normalize_answer(__snake_case ).split()
lowercase_ : str = Counter(__snake_case ) & Counter(__snake_case )
lowercase_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
lowercase_ : Dict = 1.0 * num_same / len(__snake_case )
lowercase_ : str = 1.0 * num_same / len(__snake_case )
lowercase_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowercase ( __snake_case : Optional[Any] , __snake_case : Any ):
return normalize_answer(__snake_case ) == normalize_answer(__snake_case )
def lowercase ( __snake_case : List[str] , __snake_case : List[str] ):
assert len(__snake_case ) == len(__snake_case )
lowercase_ : Union[str, Any] = 0
for hypo, pred in zip(__snake_case , __snake_case ):
em += exact_match_score(__snake_case , __snake_case )
if len(__snake_case ) > 0:
em /= len(__snake_case )
return {"em": em}
def lowercase ( __snake_case : List[str] ):
return model_prefix.startswith('''rag''' )
def lowercase ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] ):
lowercase_ : Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ : List[str] = '''dropout_rate'''
for p in extra_params:
if getattr(__snake_case , __snake_case , __snake_case ):
if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__snake_case ) )
delattr(__snake_case , __snake_case )
continue
lowercase_ : Tuple = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p]
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
delattr(__snake_case , __snake_case )
return hparams, config
| 33 |
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=True ,A=99 ,A=16 ,A=36 ,A=6 ,A=6 ,A=6 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = embedding_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_hidden_groups
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = model(snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ )
UpperCAmelCase = model(snake_case_ ,token_type_ids=snake_case_ )
UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = model(
snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,labels=snake_case_ ,sentence_order_label=snake_case_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = model(snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = AlbertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = model(
snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,start_positions=snake_case_ ,end_positions=snake_case_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = model(snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = model(snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = AlbertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase = model(
snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,labels=snake_case_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
UpperCAmelCase
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(snake_case_ ,snake_case_ ,return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=snake_case_ )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=snake_case_ )
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = AlbertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=snake_case_ ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AlbertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(snake_case_ ,attention_mask=snake_case_ )[0]
UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,snake_case_ )
UpperCAmelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case_ ,atol=1e-4 ) )
| 371 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_UpperCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_UpperCamelCase = json.load(f)
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ,A ):
return FSMTTokenizer.from_pretrained(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _UpperCamelCase ( self ,A ,A ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase = F'''facebook/wmt19-{pair}'''
UpperCAmelCase = self.get_tokenizer(A )
UpperCAmelCase = self.get_model(A )
UpperCAmelCase = bleu_data[pair]["""src"""]
UpperCAmelCase = bleu_data[pair]["""tgt"""]
UpperCAmelCase = tokenizer(A ,return_tensors="""pt""" ,truncation=A ,padding="""longest""" ).to(A )
UpperCAmelCase = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
UpperCAmelCase = tokenizer.batch_decode(
A ,skip_special_tokens=A ,clean_up_tokenization_spaces=A )
UpperCAmelCase = calculate_bleu(A ,A )
print(A )
self.assertGreaterEqual(scores["""bleu"""] ,A )
| 234 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = load_tool('''text-to-speech''' )
self.tool.setup()
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = self.tool('''hey''' )
lowerCamelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = self.tool('''hey''' )
lowerCamelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 209 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''gpt_neox'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = rotary_pct
_A = rotary_emb_base
_A = attention_dropout
_A = hidden_dropout
_A = classifier_dropout
_A = initializer_range
_A = layer_norm_eps
_A = use_cache
_A = tie_word_embeddings
_A = use_parallel_residual
_A = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_A = self.rope_scaling.get("type" , __UpperCAmelCase )
_A = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 79 | 0 |
'''simple docstring'''
from math import factorial
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCAmelCase ) // (factorial(__lowerCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 322 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
def __magic_name__ ( __a : int ):
'''simple docstring'''
return (position - 1) // 2
def __magic_name__ ( __a : int ):
'''simple docstring'''
return (2 * position) + 1
def __magic_name__ ( __a : int ):
'''simple docstring'''
return (2 * position) + 2
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = []
UpperCamelCase__ = {}
UpperCamelCase__ = 0
def __len__(self ):
return self.elements
def __repr__(self ):
return str(self.heap )
def UpperCAmelCase_ (self ):
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase__ = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase__ , UpperCamelCase__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase__ , UpperCamelCase__ = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE_ )
return elem
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Update the weight of the given key
UpperCamelCase__ = self.position_map[elem]
UpperCamelCase__ = (elem, weight)
if position > 0:
UpperCamelCase__ = get_parent_position(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE_ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE_ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase__ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase__ = get_parent_position(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = self.heap[curr_pos]
UpperCamelCase__ , UpperCamelCase__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_up(SCREAMING_SNAKE_CASE_ )
return None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase__ = self.position_map[elem]
UpperCamelCase__ , UpperCamelCase__ = self.heap[curr_pos]
UpperCamelCase__ = get_child_left_position(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = get_child_right_position(SCREAMING_SNAKE_CASE_ )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_left_position]
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_down(SCREAMING_SNAKE_CASE_ )
if child_left_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_down(SCREAMING_SNAKE_CASE_ )
else:
return None
if child_right_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_down(SCREAMING_SNAKE_CASE_ )
return None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Swap the nodes at the given positions
UpperCamelCase__ = self.heap[nodea_pos][0]
UpperCamelCase__ = self.heap[nodea_pos][0]
UpperCamelCase__ , UpperCamelCase__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase__ = nodea_pos
UpperCamelCase__ = nodea_pos
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {}
UpperCamelCase__ = 0
def __repr__(self ):
return str(self.connections )
def __len__(self ):
return self.nodes
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase__ = {}
self.nodes += 1
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def __magic_name__ ( __a : GraphUndirectedWeighted[T] , ):
'''simple docstring'''
UpperCamelCase__ = {node: maxsize for node in graph.connections}
UpperCamelCase__ = {node: None for node in graph.connections}
UpperCamelCase__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__a , __a )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase__ = priority_queue.extract_min()
UpperCamelCase__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
UpperCamelCase__ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
UpperCamelCase__ = node
return dist, parent
| 244 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-1'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-2'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-3'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
super()._init_()
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase_ (self ):
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 244 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase__ :int = False
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =StableDiffusionAttendAndExcitePipeline
lowercase_ : int =False
lowercase_ : Tuple =TEXT_TO_IMAGE_PARAMS
lowercase_ : Optional[Any] =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
lowercase_ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A__ ( cls):
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case)
@classmethod
def A__ ( cls):
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case)
def A__ ( self):
torch.manual_seed(0)
lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=1 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_snake_case ,)
lowercase = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0)
lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0)
lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
lowercase = CLIPTextModel(_snake_case)
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A__ ( self ,A__ ,A__=0):
if str(_snake_case).startswith('''mps'''):
lowercase = torch.manual_seed(_snake_case)
else:
lowercase = torch.Generator(device=_snake_case).manual_seed(_snake_case)
lowercase = lowercase = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
lowercase = self.get_dummy_inputs(_snake_case)
lowercase = pipe(**_snake_case).images
lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 6_4, 6_4, 3))
lowercase = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496])
lowercase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_snake_case ,1E-3)
def A__ ( self):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4)
def A__ ( self):
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def A__ ( self):
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7E-4)
def A__ ( self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def A__ ( self):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4)
def A__ ( self):
super().test_save_load_local(expected_max_difference=5E-4)
def A__ ( self):
super().test_save_load_optional_components(expected_max_difference=4E-4)
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
@classmethod
def A__ ( cls):
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case)
@classmethod
def A__ ( cls):
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case)
def A__ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = torch.manual_seed(5_1)
lowercase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,safety_checker=_snake_case ,torch_dtype=torch.floataa)
pipe.to('''cuda''')
lowercase = '''a painting of an elephant with glasses'''
lowercase = [5, 7]
lowercase = pipe(
prompt=_snake_case ,token_indices=_snake_case ,guidance_scale=7.5 ,generator=_snake_case ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='''numpy''' ,).images[0]
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''')
assert np.abs((expected_image - image).max()) < 5E-1
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ :str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["ViTFeatureExtractor"]
lowercase__ :int = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase__ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : Any = get_logger(__name__)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : Any = module._original_module if isinstance(UpperCamelCase , _PatchedModuleObj ) else module
class lowerCamelCase__ :
"""simple docstring"""
__a = []
def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : int = obj
__UpperCAmelCase : Union[str, Any] = target
__UpperCAmelCase : List[str] = new
__UpperCAmelCase : Optional[int] = target.split(""".""" )[0]
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Union[str, Any] = attrs or []
def __enter__( self : Dict ):
'''simple docstring'''
*__UpperCAmelCase ,__UpperCAmelCase : str = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCamelCase ) ):
try:
__UpperCAmelCase : List[Any] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__UpperCAmelCase : List[Any] = getattr(self.obj , UpperCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__UpperCAmelCase : Tuple = obj_attr
# patch at top level
setattr(self.obj , UpperCamelCase , _PatchedModuleObj(UpperCamelCase , attrs=self.attrs ) )
__UpperCAmelCase : int = getattr(self.obj , UpperCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCamelCase , UpperCamelCase , _PatchedModuleObj(getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , attrs=self.attrs ) )
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase )
# finally set the target attribute
setattr(UpperCamelCase , UpperCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__UpperCAmelCase : int = getattr(import_module(""".""".join(UpperCamelCase ) ) , UpperCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCamelCase ) is attr_value:
__UpperCAmelCase : Union[str, Any] = getattr(self.obj , UpperCamelCase )
setattr(self.obj , UpperCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__UpperCAmelCase : str = globals()["""__builtins__"""][target_attr]
setattr(self.obj , UpperCamelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : str , *UpperCamelCase : Optional[int] ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , UpperCamelCase , self.original.pop(UpperCamelCase ) )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 115 |
"""simple docstring"""
import re
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
if len(re.findall("""[ATCG]""" , _UpperCamelCase ) ) != len(_UpperCamelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 | 1 |
import math
class A__ :
"""simple docstring"""
def a_ ( self , __snake_case , __snake_case ):
snake_case = 0.0
snake_case = 0.0
for i in range(len(__snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case = SelfOrganizingMap()
snake_case = 3
snake_case = 0.5
for _ in range(UpperCamelCase_ ):
for j in range(len(UpperCamelCase_ ) ):
# training sample
snake_case = training_samples[j]
# Compute the winning vector
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# Update the winning vector
snake_case = self_organizing_map.update(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# classify test sample
snake_case = [0, 0, 0, 1]
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 213 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a_ ( self , __snake_case=0 ):
snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) )
snake_case = np.random.RandomState(__snake_case )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
snake_case = pipe(**self.get_dummy_inputs() )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self ):
snake_case = ort.SessionOptions()
snake_case = False
return options
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 213 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase : List[str] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
lowerCamelCase : Optional[Any] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
lowerCamelCase : Dict = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def UpperCAmelCase ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_0_0 , A="gpt2-large" , A=-1 , A=1_0_2_4 , A=2_5 , A=5 , A=True , A=2_5 , ) -> Optional[int]:
snake_case : int = compute_mauve(
p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , )
return out
| 124 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ):
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : List[str] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCamelCase :Tuple = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = None
UpperCamelCase :List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase :Tuple = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 38 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __A, __A, __A ) -> tuple[float, list[float]]:
'''simple docstring'''
UpperCAmelCase__ = list(range(len(__A ) ) )
UpperCAmelCase__ = [v / w for v, w in zip(__A, __A )]
index.sort(key=lambda __A : ratio[i], reverse=__A )
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0] * len(__A )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '▁'
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = BigBirdTokenizer
__UpperCAmelCase : Optional[int] = BigBirdTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "<s>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 143 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( _snake_case):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowercase_ , '''depth_multiplier''' ) )
class __snake_case :
def __init__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=1_3 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Union[str, Any]=3_2 , __lowerCAmelCase : str=0.25 , __lowerCAmelCase : int=8 , __lowerCAmelCase : Tuple=8 , __lowerCAmelCase : int=6 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Dict="relu6" , __lowerCAmelCase : List[Any]=1_2_8_0 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=1_0 , __lowerCAmelCase : Tuple=None , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = depth_multiplier
_lowerCamelCase : Optional[int] = depth_divisible_by
_lowerCamelCase : Tuple = min_depth
_lowerCamelCase : Optional[Any] = expand_ratio
_lowerCamelCase : int = tf_padding
_lowerCamelCase : List[Any] = output_stride
_lowerCamelCase : str = first_layer_is_expansion
_lowerCamelCase : Union[str, Any] = finegrained_output
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_lowerCamelCase : Optional[int] = classifier_dropout_prob
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : str = is_training
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Dict = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Any = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : List[Any] = MobileNetVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _snake_case , _snake_case , unittest.TestCase):
snake_case__ : int = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ : Any = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : List[str] = False
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MobileNetVaModelTester(self )
_lowerCamelCase : Optional[Any] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(lowercase_ )
_lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
_lowerCamelCase : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
_lowerCamelCase : Any = outputs.hidden_states
_lowerCamelCase : List[str] = 1_6
self.assertEqual(len(lowercase_ ) , lowercase_ )
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(lowercase_ )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**lowercase_ )
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowercase_ )
_lowerCamelCase : Optional[Any] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
_lowerCamelCase : Optional[int] = model.to(lowercase_ )
_lowerCamelCase : str = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : Any = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**lowercase_ )
_lowerCamelCase : str = outputs.logits
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , lowercase_ )
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
| 72 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
UpperCAmelCase : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : int = value
elif weight_type == "weight_v":
UpperCAmelCase : str = value
elif weight_type == "bias":
UpperCAmelCase : List[Any] = value
else:
UpperCAmelCase : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = []
UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase : Dict = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : int = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase : Any = True
if "*" in mapped_key:
UpperCAmelCase : Any = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : int = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : Optional[int] = 'weight_v'
elif "weight" in name:
UpperCAmelCase : Optional[Any] = 'weight'
elif "bias" in name:
UpperCAmelCase : str = 'bias'
else:
UpperCAmelCase : str = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
UpperCAmelCase : Optional[Any] = name.split('.' )
UpperCAmelCase : Optional[Any] = int(items[0] )
UpperCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = SEWConfig()
if is_finetuned:
UpperCAmelCase : List[str] = model.wav_encoder.wav_model.cfg
else:
UpperCAmelCase : Optional[Any] = model.cfg
UpperCAmelCase : str = fs_config.conv_bias
UpperCAmelCase : Optional[Any] = eval(fs_config.conv_feature_layers )
UpperCAmelCase : Optional[Any] = [x[0] for x in conv_layers]
UpperCAmelCase : str = [x[1] for x in conv_layers]
UpperCAmelCase : str = [x[2] for x in conv_layers]
UpperCAmelCase : Tuple = 'gelu'
UpperCAmelCase : List[str] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
UpperCAmelCase : List[Any] = 0.0
UpperCAmelCase : Optional[int] = fs_config.activation_fn.name
UpperCAmelCase : Tuple = fs_config.encoder_embed_dim
UpperCAmelCase : List[str] = 0.02
UpperCAmelCase : Any = fs_config.encoder_ffn_embed_dim
UpperCAmelCase : Any = 1E-5
UpperCAmelCase : Any = fs_config.encoder_layerdrop
UpperCAmelCase : List[str] = fs_config.encoder_attention_heads
UpperCAmelCase : Union[str, Any] = fs_config.conv_pos_groups
UpperCAmelCase : str = fs_config.conv_pos
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = fs_config.encoder_layers
UpperCAmelCase : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCAmelCase : List[Any] = model.cfg
UpperCAmelCase : Tuple = fs_config.final_dropout
UpperCAmelCase : Tuple = fs_config.layerdrop
UpperCAmelCase : int = fs_config.activation_dropout
UpperCAmelCase : Union[str, Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCAmelCase : str = fs_config.attention_dropout
UpperCAmelCase : Optional[Any] = fs_config.dropout_input
UpperCAmelCase : Optional[int] = fs_config.dropout
UpperCAmelCase : str = fs_config.mask_channel_length
UpperCAmelCase : Optional[Any] = fs_config.mask_channel_prob
UpperCAmelCase : Any = fs_config.mask_length
UpperCAmelCase : int = fs_config.mask_prob
UpperCAmelCase : Optional[Any] = 'Wav2Vec2FeatureExtractor'
UpperCAmelCase : Tuple = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True ):
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCAmelCase : List[str] = SEWConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = convert_config(model[0] , UpperCAmelCase_ )
UpperCAmelCase : int = model[0].eval()
UpperCAmelCase : Tuple = True if config.feat_extract_norm == 'layer' else False
UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
if is_finetuned:
if dict_path:
UpperCAmelCase : Optional[Any] = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : List[Any] = target_dict.pad_index
UpperCAmelCase : Optional[Any] = target_dict.bos_index
UpperCAmelCase : int = target_dict.pad_index
UpperCAmelCase : Tuple = target_dict.bos_index
UpperCAmelCase : int = target_dict.eos_index
UpperCAmelCase : Optional[int] = len(target_dict.symbols )
UpperCAmelCase : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase_ , )
UpperCAmelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase : List[str] = SEWForCTC(UpperCAmelCase_ )
else:
UpperCAmelCase : Tuple = SEWModel(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 151 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = 1
__UpperCAmelCase = 2
while i * i <= n:
__UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase__ ( ):
__UpperCAmelCase = 1
__UpperCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 358 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _UpperCAmelCase :
def __init__( self : Optional[int] , _lowercase : Any , _lowercase : List[str]=14 , _lowercase : Dict=7 , _lowercase : Optional[int]=True , _lowercase : Optional[int]=True , _lowercase : Any=False , _lowercase : Any=True , _lowercase : List[str]=99 , _lowercase : int=32 , _lowercase : Union[str, Any]=4 , _lowercase : Dict=4 , _lowercase : List[Any]=4 , _lowercase : Dict=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[int]=0.1 , _lowercase : Dict=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : int=0.02 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = rotary_dim
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = None
__UpperCAmelCase = vocab_size - 1
__UpperCAmelCase = vocab_size - 1
__UpperCAmelCase = vocab_size - 1
def a ( self : int ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def a ( self : str ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a ( self : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : List[str] ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(_lowercase )
__UpperCAmelCase = model.init_cache(input_ids.shape[0] , _lowercase )
__UpperCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
__UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model(
input_ids[:, -1:] , attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , position_ids=_lowercase , )
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def a ( self : List[Any] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(_lowercase )
__UpperCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCAmelCase = model.init_cache(input_ids.shape[0] , _lowercase )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
__UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowercase , position_ids=_lowercase , )
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
a__ : List[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def a ( self : List[Any] ):
__UpperCAmelCase = FlaxGPTJModelTester(self )
def a ( self : Any ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowercase , _lowercase , _lowercase , _lowercase )
@tooslow
def a ( self : Tuple ):
__UpperCAmelCase = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
__UpperCAmelCase = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=_lowercase , truncation=_lowercase )
__UpperCAmelCase = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
__UpperCAmelCase = False
__UpperCAmelCase = model.config.eos_token_id
__UpperCAmelCase = jax.jit(model.generate )
__UpperCAmelCase = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCAmelCase = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_lowercase , _lowercase )
@is_pt_flax_cross_test
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase = getattr(_lowercase , _lowercase )
__UpperCAmelCase , __UpperCAmelCase = pt_inputs['''input_ids'''].shape
__UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = pt_model_class(_lowercase ).eval()
__UpperCAmelCase = model_class(_lowercase , dtype=jnp.floataa )
__UpperCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
__UpperCAmelCase = fx_state
with torch.no_grad():
__UpperCAmelCase = pt_model(**_lowercase ).to_tuple()
__UpperCAmelCase = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
__UpperCAmelCase = model_class.from_pretrained(_lowercase , from_pt=_lowercase )
__UpperCAmelCase = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def a ( self : Any ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase = getattr(_lowercase , _lowercase )
__UpperCAmelCase = pt_model_class(_lowercase ).eval()
__UpperCAmelCase = model_class(_lowercase , dtype=jnp.floataa )
__UpperCAmelCase = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
__UpperCAmelCase , __UpperCAmelCase = pt_inputs['''input_ids'''].shape
__UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 0
__UpperCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCAmelCase = pt_model(**_lowercase ).to_tuple()
__UpperCAmelCase = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
__UpperCAmelCase = pt_model_class.from_pretrained(_lowercase , from_flax=_lowercase )
with torch.no_grad():
__UpperCAmelCase = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def a ( self : Tuple ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
__UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 86 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase ( snake_case__ , snake_case__=False , snake_case__=False ):
__UpperCamelCase : List[str] = "backbone." if is_semantic else ""
__UpperCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=False , snake_case__=False ):
for i in range(config.num_hidden_layers ):
__UpperCamelCase : Any = "backbone." if is_semantic else ""
# queries, keys and values
__UpperCamelCase : str = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__UpperCamelCase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__UpperCamelCase : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__UpperCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase : Union[str, Any] = q_bias
__UpperCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase : List[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__UpperCamelCase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__UpperCamelCase : Optional[int] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__UpperCamelCase : str = gamma_a
__UpperCamelCase : str = gamma_a
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = dct.pop(snake_case__ )
__UpperCamelCase : Optional[Any] = val
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=False ):
__UpperCamelCase : Any = False if "rvlcdip" in checkpoint_url else True
__UpperCamelCase : Optional[Any] = BeitConfig(use_absolute_position_embeddings=snake_case__ , use_mask_token=snake_case__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__UpperCamelCase : Union[str, Any] = 1_024
__UpperCamelCase : Optional[Any] = 4_096
__UpperCamelCase : Any = 24
__UpperCamelCase : Any = 16
# labels
if "rvlcdip" in checkpoint_url:
__UpperCamelCase : Optional[Any] = 16
__UpperCamelCase : Optional[int] = "huggingface/label-files"
__UpperCamelCase : List[str] = "rvlcdip-id2label.json"
__UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : int = idalabel
__UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__UpperCamelCase : str = torch.hub.load_state_dict_from_url(snake_case__ , map_location="cpu" )["model"]
__UpperCamelCase : Optional[Any] = create_rename_keys(snake_case__ , has_lm_head=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , has_lm_head=snake_case__ )
# load HuggingFace model
__UpperCamelCase : Union[str, Any] = BeitForMaskedImageModeling(snake_case__ ) if has_lm_head else BeitForImageClassification(snake_case__ )
model.eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image
__UpperCamelCase : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case__ )
__UpperCamelCase : int = prepare_img()
__UpperCamelCase : Optional[Any] = image_processor(images=snake_case__ , return_tensors="pt" )
__UpperCamelCase : Tuple = encoding["pixel_values"]
__UpperCamelCase : Any = model(snake_case__ )
__UpperCamelCase : Optional[Any] = outputs.logits
# verify logits
__UpperCamelCase : List[str] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(snake_case__ ), "Shape of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
if has_lm_head:
__UpperCamelCase : Tuple = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__UpperCamelCase : Optional[Any] = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_lowerCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 298 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = len(lowercase )
for i in range(length - 1 ):
snake_case : List[str] = i
for k in range(i + 1 , lowercase ):
if collection[k] < collection[least]:
snake_case : List[str] = k
if least != i:
snake_case ,snake_case : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 203 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = """segformer"""
def __init__( self : str , snake_case_ : Optional[int]=3 , snake_case_ : Any=4 , snake_case_ : Union[str, Any]=[2, 2, 2, 2] , snake_case_ : List[str]=[8, 4, 2, 1] , snake_case_ : Tuple=[32, 64, 160, 256] , snake_case_ : List[Any]=[7, 3, 3, 3] , snake_case_ : Dict=[4, 2, 2, 2] , snake_case_ : Union[str, Any]=[1, 2, 5, 8] , snake_case_ : Union[str, Any]=[4, 4, 4, 4] , snake_case_ : Any="gelu" , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : List[Any]=0.1 , snake_case_ : str=0.02 , snake_case_ : int=0.1 , snake_case_ : Optional[Any]=1e-6 , snake_case_ : List[Any]=256 , snake_case_ : Union[str, Any]=255 , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , snake_case_ , )
UpperCamelCase_: List[Any] = num_channels
UpperCamelCase_: Union[str, Any] = num_encoder_blocks
UpperCamelCase_: Any = depths
UpperCamelCase_: Optional[Any] = sr_ratios
UpperCamelCase_: str = hidden_sizes
UpperCamelCase_: List[str] = patch_sizes
UpperCamelCase_: str = strides
UpperCamelCase_: Any = mlp_ratios
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Optional[int] = hidden_act
UpperCamelCase_: List[Any] = hidden_dropout_prob
UpperCamelCase_: List[Any] = attention_probs_dropout_prob
UpperCamelCase_: str = classifier_dropout_prob
UpperCamelCase_: int = initializer_range
UpperCamelCase_: Union[str, Any] = drop_path_rate
UpperCamelCase_: Tuple = layer_norm_eps
UpperCamelCase_: Dict = decoder_hidden_size
UpperCamelCase_: Optional[int] = kwargs.get("""reshape_last_stage""" , snake_case_ )
UpperCamelCase_: Dict = semantic_loss_ignore_index
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : str ):
return 1e-4
@property
def lowerCAmelCase__ ( self : str ):
return 12
| 223 |
def A__ ( lowerCamelCase ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 223 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : List[Any]=7 , __lowercase : List[str]=3 , __lowercase : List[str]=30 , __lowercase : Optional[int]=400 , __lowercase : Optional[int]=True , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[int]=1 / 255 , __lowercase : Any=True , __lowercase : List[str]=[0.5, 0.5, 0.5] , __lowercase : str=[0.5, 0.5, 0.5] , __lowercase : List[Any]=True , ):
"""simple docstring"""
__lowercase =size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__lowercase =parent
__lowercase =batch_size
__lowercase =num_channels
__lowercase =min_resolution
__lowercase =max_resolution
__lowercase =do_resize
__lowercase =size
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_normalize
__lowercase =image_mean
__lowercase =image_std
__lowercase =do_pad
def snake_case ( self : List[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case ( self : Dict , __lowercase : Any , __lowercase : Any=False ):
"""simple docstring"""
if not batched:
__lowercase =image_inputs[0]
if isinstance(__lowercase , Image.Image ):
__lowercase , __lowercase =image.size
else:
__lowercase , __lowercase =image.shape[1], image.shape[2]
if w < h:
__lowercase =int(self.size['shortest_edge'] * h / w )
__lowercase =self.size['shortest_edge']
elif w > h:
__lowercase =self.size['shortest_edge']
__lowercase =int(self.size['shortest_edge'] * w / h )
else:
__lowercase =self.size['shortest_edge']
__lowercase =self.size['shortest_edge']
else:
__lowercase =[]
for image in image_inputs:
__lowercase , __lowercase =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase =max(__lowercase , key=lambda __lowercase : item[0] )[0]
__lowercase =max(__lowercase , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase ( A , unittest.TestCase ):
lowerCAmelCase_ = DetrImageProcessor if is_vision_available() else None
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =DetrImageProcessingTester(self )
@property
def snake_case ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , 'image_mean' ) )
self.assertTrue(hasattr(__lowercase , 'image_std' ) )
self.assertTrue(hasattr(__lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(__lowercase , 'rescale_factor' ) )
self.assertTrue(hasattr(__lowercase , 'do_resize' ) )
self.assertTrue(hasattr(__lowercase , 'size' ) )
self.assertTrue(hasattr(__lowercase , 'do_pad' ) )
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __lowercase )
__lowercase =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __lowercase )
def snake_case ( self : Tuple ):
"""simple docstring"""
pass
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowercase =json.loads(f.read() )
__lowercase ={'image_id': 39769, 'annotations': target}
# encode them
__lowercase =DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowercase =image_processing(images=__lowercase , annotations=__lowercase , return_tensors='pt' )
# verify pixel values
__lowercase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __lowercase )
__lowercase =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __lowercase , atol=1E-4 ) )
# verify area
__lowercase =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __lowercase ) )
# verify boxes
__lowercase =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __lowercase )
__lowercase =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __lowercase , atol=1E-3 ) )
# verify image_id
__lowercase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __lowercase ) )
# verify is_crowd
__lowercase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __lowercase ) )
# verify class_labels
__lowercase =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __lowercase ) )
# verify orig_size
__lowercase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __lowercase ) )
# verify size
__lowercase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __lowercase ) )
@slow
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowercase =json.loads(f.read() )
__lowercase ={'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__lowercase =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowercase =DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowercase =image_processing(images=__lowercase , annotations=__lowercase , masks_path=__lowercase , return_tensors='pt' )
# verify pixel values
__lowercase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __lowercase )
__lowercase =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __lowercase , atol=1E-4 ) )
# verify area
__lowercase =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __lowercase ) )
# verify boxes
__lowercase =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __lowercase )
__lowercase =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __lowercase , atol=1E-3 ) )
# verify image_id
__lowercase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __lowercase ) )
# verify is_crowd
__lowercase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __lowercase ) )
# verify class_labels
__lowercase =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __lowercase ) )
# verify masks
__lowercase =822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __lowercase )
# verify orig_size
__lowercase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __lowercase ) )
# verify size
__lowercase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __lowercase ) )
| 141 |
'''simple docstring'''
import datasets
UpperCAmelCase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
UpperCAmelCase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
UpperCAmelCase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : List[str] ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def snake_case ( self : List[str] , __lowercase : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
| 141 | 1 |
"""simple docstring"""
import math
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = sum(i * i for i in range(1 , n + 1 ) )
__SCREAMING_SNAKE_CASE = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 255 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCAmelCase__ = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=3 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 2_8_8}
__SCREAMING_SNAKE_CASE = size_divisor
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_pad
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
def snake_case_ ( self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False):
if not batched:
__SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
__SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
__SCREAMING_SNAKE_CASE = size / min(lowerCAmelCase__ , lowerCAmelCase__)
if h < w:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = size, scale * w
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = scale * h, size
__SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size)
if max(lowerCAmelCase__ , lowerCAmelCase__) > max_size:
__SCREAMING_SNAKE_CASE = max_size / max(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = newh * scale
__SCREAMING_SNAKE_CASE = neww * scale
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = int(newh + 0.5), int(neww + 0.5)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__SCREAMING_SNAKE_CASE = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[0])[0]
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """size"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor"""))
def snake_case_ ( self):
pass
def snake_case_ ( self):
# Initialize image processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self):
# Initialize image processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self):
# Initialize image processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 255 | 1 |
from timeit import timeit
_UpperCAmelCase : Union[str, Any] = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = len(_UpperCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Dict = len(_UpperCAmelCase ) // 2
lowerCamelCase__ : Tuple = len(_UpperCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if len(_UpperCAmelCase ) <= 2:
return True
if s[0] == s[len(_UpperCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return s == s[::-1]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
lowerCamelCase__ : Any = F"""all({name}(key) is value for key, value in test_data.items())"""
lowerCamelCase__ : List[Any] = F"""from __main__ import test_data, {name}"""
lowerCamelCase__ : int = 50_0000
lowerCamelCase__ : List[str] = timeit(stmt=_UpperCAmelCase , setup=_UpperCAmelCase , number=_UpperCAmelCase )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 50 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
__snake_case = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
__snake_case = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
__snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case = [0] * args.vocab_size
for k, v in counter.items():
__snake_case = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 112 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
__snake_case = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
__snake_case = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
__snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case = [0] * args.vocab_size
for k, v in counter.items():
__snake_case = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 112 | 1 |
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a_ , n - 1 , a_ ) * a) % mod
else:
__A = binary_exponentiation(a_ , n / 2 , a_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE :Any = 701
SCREAMING_SNAKE_CASE :List[str] = 10_0000_0000
SCREAMING_SNAKE_CASE :List[str] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 15 |
from PIL import Image
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
_UpperCAmelCase : Tuple = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 285 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9_9_9 , _SCREAMING_SNAKE_CASE="cosine" , ) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ = []
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : int , a : int = 1_000 , a : str = "fixed_small_log" , a : bool = True , a : Optional[float] = 1.0 , a : str = "epsilon" , a : str = "squaredcos_cap_v2" , )-> str:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
lowercase__ = betas_for_alpha_bar(a )
lowercase__ = 1.0 - self.betas
lowercase__ = torch.cumprod(self.alphas , dim=0 )
lowercase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ = 1.0
# setable values
lowercase__ = None
lowercase__ = torch.from_numpy(np.arange(0 , a )[::-1].copy() )
lowercase__ = variance_type
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : torch.FloatTensor , a : Optional[int] = None )-> torch.FloatTensor:
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int , a : Union[str, torch.device] = None )-> Tuple:
"""simple docstring"""
lowercase__ = num_inference_steps
lowercase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ = torch.from_numpy(a ).to(a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[str] , a : List[str]=None , a : Optional[int]=None , a : str=None )-> Dict:
"""simple docstring"""
if prev_timestep is None:
lowercase__ = t - 1
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ = self.betas[t]
else:
lowercase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ = torch.log(torch.clamp(a , min=1E-2_0 ) )
lowercase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ = variance.log()
lowercase__ = beta.log()
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE_ ( self : Any , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : Optional[int] = None , a : Optional[Any]=None , a : bool = True , )-> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ , lowercase__ = torch.split(a , sample.shape[1] , dim=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ = t - 1
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ = self.betas[t]
lowercase__ = self.alphas[t]
else:
lowercase__ = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = torch.clamp(
a , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ = 0
if t > 0:
lowercase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=a , device=model_output.device )
lowercase__ = self._get_variance(
a , predicted_variance=a , prev_timestep=a , )
if self.variance_type == "fixed_small_log":
lowercase__ = variance
elif self.variance_type == "learned_range":
lowercase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
lowercase__ = variance * variance_noise
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=a , pred_original_sample=a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.IntTensor , )-> torch.FloatTensor:
"""simple docstring"""
lowercase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = alphas_cumprod[timesteps] ** 0.5
lowercase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 368 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
lowercase__ = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , _SCREAMING_SNAKE_CASE )
if matches:
lowercase__ = float(matches[1] )
lowercase__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase__ = 1001
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 'huggingface/label-files'
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
lowercase__ = 'background'
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> int:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
lowercase__ = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
lowercase__ = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase__ = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
lowercase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__ = model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowercase__ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase__ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing to the hub...' )
lowercase__ = 'google/' + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 269 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : str=[] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = size[0] - overlap_pixels * 2
UpperCAmelCase_ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase_ = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
UpperCAmelCase_ = np.pad(lowerCAmelCase__ , mode="linear_ramp" , pad_width=lowerCAmelCase__ , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase_ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase_ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase_ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase_ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : int ) -> List[str]:
'''simple docstring'''
return max(lowerCAmelCase__ , min(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowerCAmelCase_ ( snake_case_ : [int] , snake_case_ : [int] , snake_case_ : [int] ) -> Union[str, Any]:
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_ ( snake_case_ : [int] , snake_case_ : int , snake_case_ : [int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = list(lowerCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase_ = clamp_rect(lowerCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase__ , (original_slice, 0) )
return result
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase_ = tile.crop(lowerCAmelCase__ )
return tile
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = n % d
return n - divisor
class __A ( __SCREAMING_SNAKE_CASE ):
def __init__(self : int , __a : Union[str, Any] , __a : Any , __a : List[Any] , __a : List[str] , __a : Tuple , __a : Optional[int] , __a : Any = 350 , ):
super().__init__(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , max_noise_level=__UpperCAmelCase , )
def _lowercase (self : Union[str, Any] , __a : Any , __a : Optional[int] , __a : List[Any] , __a : Any , __a : List[str] , __a : Optional[int] , __a : str , **__a : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase_ = add_overlap_rect(__UpperCAmelCase , __UpperCAmelCase , image.size )
UpperCAmelCase_ = image.crop(__UpperCAmelCase )
UpperCAmelCase_ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase_ = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase_ = max(0 , __UpperCAmelCase )
UpperCAmelCase_ = squeeze_tile(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = to_input.size
UpperCAmelCase_ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase_ = super(__UpperCAmelCase , self ).__call__(image=__UpperCAmelCase , **__UpperCAmelCase ).images[0]
UpperCAmelCase_ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase_ = unsqueeze_tile(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase_ = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
UpperCAmelCase_ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__UpperCAmelCase ) , mode="L" , )
final_image.paste(
__UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __UpperCAmelCase )
@torch.no_grad()
def __call__(self : Optional[int] , __a : Optional[int] , __a : Dict , __a : List[str] = 75 , __a : Union[str, Any] = 9.0 , __a : Any = 50 , __a : str = None , __a : List[Any] = 1 , __a : Dict = 0.0 , __a : Dict = None , __a : Optional[int] = None , __a : Optional[Any] = None , __a : str = 1 , __a : int = 128 , __a : Tuple = 32 , __a : Union[str, Any] = 32 , ):
UpperCAmelCase_ = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase_ = math.ceil(image.size[0] / tile_size )
UpperCAmelCase_ = math.ceil(image.size[1] / tile_size )
UpperCAmelCase_ = tcx * tcy
UpperCAmelCase_ = 0
for y in range(__UpperCAmelCase ):
for x in range(__UpperCAmelCase ):
self._process_tile(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prompt=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , noise_level=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase_ = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase__ , revision="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(snake_case_ : Optional[int] ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
UpperCAmelCase_ = pipe(image=lowerCAmelCase__ , prompt="Black font, white background, vector" , noise_level=40 , callback=lowerCAmelCase__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : tuple , lowerCAmelCase__ : Path , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCAmelCase : Optional[int] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__UpperCAmelCase : Dict = """cpu"""
__UpperCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=lowerCAmelCase__ ).to(lowerCAmelCase__ )
__UpperCAmelCase : List[str] = Path(lowerCAmelCase__ )
# TEXT ENCODER
__UpperCAmelCase : Any = pipeline.text_encoder.config.max_position_embeddings
__UpperCAmelCase : str = pipeline.text_encoder.config.hidden_size
__UpperCAmelCase : Optional[Any] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , )
del pipeline.text_encoder
# UNET
__UpperCAmelCase : Optional[int] = pipeline.unet.config.in_channels
__UpperCAmelCase : Tuple = pipeline.unet.config.sample_size
__UpperCAmelCase : Dict = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=lowerCAmelCase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , )
__UpperCAmelCase : Any = str(unet_path.absolute().as_posix() )
__UpperCAmelCase : int = os.path.dirname(lowerCAmelCase__ )
__UpperCAmelCase : Tuple = onnx.load(lowerCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase__ )
os.mkdir(lowerCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase__ , lowerCAmelCase__ , save_as_external_data=lowerCAmelCase__ , all_tensors_to_one_file=lowerCAmelCase__ , location="""weights.pb""" , convert_attribute=lowerCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
__UpperCAmelCase : Union[str, Any] = pipeline.vae
__UpperCAmelCase : str = vae_encoder.config.in_channels
__UpperCAmelCase : Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__UpperCAmelCase : str = lambda lowerCAmelCase__ , lowerCAmelCase__ : vae_encoder.encode(lowerCAmelCase__ , lowerCAmelCase__ )[0].sample()
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
# VAE DECODER
__UpperCAmelCase : Optional[Any] = pipeline.vae
__UpperCAmelCase : Optional[int] = vae_decoder.config.latent_channels
__UpperCAmelCase : Dict = vae_decoder.config.out_channels
# forward only through the decoder part
__UpperCAmelCase : List[Any] = vae_encoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__UpperCAmelCase : Tuple = pipeline.safety_checker
__UpperCAmelCase : Union[str, Any] = safety_checker.config.vision_config.num_channels
__UpperCAmelCase : Any = safety_checker.config.vision_config.image_size
__UpperCAmelCase : Optional[int] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=lowerCAmelCase__ , )
del pipeline.safety_checker
__UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
__UpperCAmelCase : Any = pipeline.feature_extractor
else:
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase__ )
print("""ONNX pipeline saved to""" , lowerCAmelCase__ )
del pipeline
del onnx_pipeline
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_UpperCamelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 254 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
A: List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE_ )
A: Dict = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE_ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE_ ):
if self._should_log(SCREAMING_SNAKE_CASE_ ):
A: str = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif in_order:
A: Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A: Optional[Any] = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
state.wait_for_everyone()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None ) -> int:
if log_level is None:
A: Dict = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __lowercase )
A: Tuple = logging.getLogger(__lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__lowercase , {} )
| 365 |
'''simple docstring'''
from collections import deque
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
'''simple docstring'''
A: Union[str, Any] = process_name # process name
A: List[str] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A: Dict = arrival_time
A: Optional[Any] = burst_time # remaining burst time
A: Any = 0 # total time of the process wait in ready queue
A: Any = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None:
'''simple docstring'''
A: Dict = number_of_queues
# time slice of queues that round robin algorithm applied
A: int = time_slices
# unfinished process is in this ready_queue
A: Tuple = queue
# current time
A: int = current_time
# finished process is in this sequence queue
A: deque[Process] = deque()
def _snake_case ( self : List[Any] ) -> list[str]:
'''simple docstring'''
A: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Any = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
A: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A: Any = 0
# set the process's turnaround time because it is finished
A: int = self.current_time - cp.arrival_time
# set the completion time
A: List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
A: Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A: Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A: int = 0
# set the finish time
A: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
A: Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _snake_case ( self : Optional[Any] ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
A , A: Optional[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 334 | 0 |
import math
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase_ : int = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowercase__ ( __snake_case : Dict , __snake_case : Tuple=1 , **__snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = factor * value
UpperCAmelCase_ : List[Any] = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 29 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = 1
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
snake_case = []
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
snake_case = resnets
snake_case = attentions
if self.add_downsample:
snake_case = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Union[str, Any]=True )-> Union[str, Any]:
snake_case = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
snake_case = attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
snake_case = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = True
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : Tuple )-> int:
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = resnets
if self.add_downsample:
snake_case = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any]=True )-> Dict:
snake_case = ()
for resnet in self.resnets:
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
snake_case = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = 1
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = []
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case = self.prev_output_channel if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
snake_case = resnets
snake_case = attentions
if self.add_upsample:
snake_case = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __snake_case : int , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any]=True )-> List[Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case = res_hidden_states_tuple[-1]
snake_case = res_hidden_states_tuple[:-1]
snake_case = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
snake_case = attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
snake_case = self.upsamplers_a(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = True
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case = self.prev_output_channel if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = resnets
if self.add_upsample:
snake_case = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict=True )-> Tuple:
for resnet in self.resnets:
# pop res hidden states
snake_case = res_hidden_states_tuple[-1]
snake_case = res_hidden_states_tuple[:-1]
snake_case = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
snake_case = self.upsamplers_a(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = 1
snake_case_ = False
snake_case_ = False
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# there is always at least one resnet
snake_case = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case = []
for _ in range(self.num_layers ):
snake_case = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
snake_case = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = resnets
snake_case = attentions
def __call__( self : List[str] , __snake_case : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict=True )-> Union[str, Any]:
snake_case = self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case = attn(__snake_case , __snake_case , deterministic=__snake_case )
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 363 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase ( lowerCAmelCase__ : Dataset , lowerCAmelCase__ : Dict[str, str] ) -> Dict:
__a = args.log_outputs
__a = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__a = load_metric('''wer''' )
__a = load_metric('''cer''' )
# compute metrics
__a = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__a = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__a = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowerCAmelCase__ )
with open(f'''{dataset_id}_eval_results.txt''' , '''w''' ) as f:
f.write(lowerCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a = f'''log_{dataset_id}_predictions.txt'''
__a = f'''log_{dataset_id}_targets.txt'''
with open(lowerCAmelCase__ , '''w''' ) as p, open(lowerCAmelCase__ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ):
p.write(f'''{i}''' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f'''{i}''' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCAmelCase__ , with_indices=lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str ) -> str:
__a = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a = re.sub(lowerCAmelCase__ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__a = ''' '''.join(text.split(lowerCAmelCase__ ) )
return text
def lowercase ( lowerCAmelCase__ : int ) -> Any:
# load dataset
__a = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a = AutoFeatureExtractor.from_pretrained(args.model_id )
__a = feature_extractor.sampling_rate
# resample audio
__a = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCAmelCase__ ) )
# load eval pipeline
if args.device is None:
__a = 0 if torch.cuda.is_available() else -1
__a = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ : Any ):
__a = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__a = prediction['''text''']
__a = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__a = dataset.map(lowerCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
lowercase_ = parser.parse_args()
main(args)
| 45 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase :Dict = logging.get_logger(__name__)
__UpperCAmelCase :Optional[int] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "instructblip_vision_model"
def __init__( self : Optional[int] , snake_case : List[str]=1408 , snake_case : Dict=6144 , snake_case : Union[str, Any]=39 , snake_case : List[Any]=16 , snake_case : str=224 , snake_case : Union[str, Any]=14 , snake_case : Tuple="gelu" , snake_case : List[Any]=1E-6 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=1E-10 , snake_case : Tuple=True , **snake_case : Tuple , ) -> List[str]:
super().__init__(**snake_case )
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Tuple = layer_norm_eps
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = qkv_bias
@classmethod
def lowerCamelCase__ ( cls : Any , snake_case : Union[str, os.PathLike] , **snake_case : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case )
__UpperCAmelCase , __UpperCAmelCase : Any = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__UpperCAmelCase : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case , **snake_case )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = "instructblip_qformer"
def __init__( self : str , snake_case : List[str]=3_0522 , snake_case : Optional[int]=768 , snake_case : Tuple=12 , snake_case : str=12 , snake_case : Union[str, Any]=3072 , snake_case : Any="gelu" , snake_case : Dict=0.1 , snake_case : Dict=0.1 , snake_case : List[str]=512 , snake_case : Optional[int]=0.02 , snake_case : Union[str, Any]=1E-12 , snake_case : Tuple=0 , snake_case : Union[str, Any]="absolute" , snake_case : Tuple=2 , snake_case : List[Any]=1408 , **snake_case : Optional[int] , ) -> Dict:
super().__init__(pad_token_id=snake_case , **snake_case )
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[Any] = cross_attention_frequency
__UpperCAmelCase : int = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , snake_case : Union[str, os.PathLike] , **snake_case : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case )
__UpperCAmelCase , __UpperCAmelCase : int = cls.get_config_dict(snake_case , **snake_case )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__UpperCAmelCase : Tuple = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case , **snake_case )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "instructblip"
SCREAMING_SNAKE_CASE : List[str] = True
def __init__( self : List[Any] , snake_case : Optional[Any]=None , snake_case : Tuple=None , snake_case : Optional[Any]=None , snake_case : Tuple=32 , **snake_case : Any ) -> Any:
super().__init__(**snake_case )
if vision_config is None:
__UpperCAmelCase : str = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
__UpperCAmelCase : Tuple = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
__UpperCAmelCase : Optional[Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__UpperCAmelCase : Optional[Any] = InstructBlipVisionConfig(**snake_case )
__UpperCAmelCase : Any = InstructBlipQFormerConfig(**snake_case )
__UpperCAmelCase : Optional[int] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__UpperCAmelCase : str = CONFIG_MAPPING[text_model_type](**snake_case )
__UpperCAmelCase : Optional[Any] = self.text_config.tie_word_embeddings
__UpperCAmelCase : Optional[Any] = self.text_config.is_encoder_decoder
__UpperCAmelCase : List[str] = num_query_tokens
__UpperCAmelCase : Tuple = self.vision_config.hidden_size
__UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCAmelCase : Optional[Any] = 1.0
__UpperCAmelCase : str = 0.02
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , snake_case : InstructBlipVisionConfig , snake_case : InstructBlipQFormerConfig , snake_case : PretrainedConfig , **snake_case : Optional[Any] , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Any = self.vision_config.to_dict()
__UpperCAmelCase : str = self.qformer_config.to_dict()
__UpperCAmelCase : List[str] = self.text_config.to_dict()
__UpperCAmelCase : Dict = self.__class__.model_type
return output | 240 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = IFPipeline
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : str = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
return self._get_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any]=0 ) -> Optional[Any]:
if str(snake_case ).startswith('''mps''' ):
__UpperCAmelCase : Optional[Any] = torch.manual_seed(snake_case )
else:
__UpperCAmelCase : Dict = torch.Generator(device=snake_case ).manual_seed(snake_case )
__UpperCAmelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[str]:
self._test_save_load_local()
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ) -> Tuple:
# if
__UpperCAmelCase : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
__UpperCAmelCase : List[str] = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=snake_case , tokenizer=snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
__UpperCAmelCase , __UpperCAmelCase : Tuple = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__UpperCAmelCase : Any = None
__UpperCAmelCase : Optional[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case , snake_case , snake_case , snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__UpperCAmelCase : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
__UpperCAmelCase : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case , snake_case , snake_case , snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__UpperCAmelCase : List[str] = IFInpaintingPipeline(**pipe_a.components )
__UpperCAmelCase : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case , snake_case , snake_case , snake_case )
def lowerCamelCase__ ( self : List[str] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : str ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : List[str] = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , num_inference_steps=2 , generator=snake_case , output_type='''np''' , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__UpperCAmelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(snake_case , snake_case )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
__UpperCAmelCase : List[Any] = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
__UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(snake_case , snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : str , snake_case : Dict ) -> str:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
__UpperCAmelCase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : Dict = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , num_inference_steps=2 , generator=snake_case , output_type='''np''' , )
__UpperCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__UpperCAmelCase : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(snake_case , snake_case )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case )
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
__UpperCAmelCase : List[str] = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , original_image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : int = output.images[0]
assert image.shape == (256, 256, 3)
__UpperCAmelCase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(snake_case , snake_case )
def lowerCamelCase__ ( self : str , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case )
__UpperCAmelCase : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : Dict = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , mask_image=snake_case , num_inference_steps=2 , generator=snake_case , output_type='''np''' , )
__UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__UpperCAmelCase : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(snake_case , snake_case )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
__UpperCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case )
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case )
__UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , mask_image=snake_case , original_image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
__UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(snake_case , snake_case )
def _a ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 240 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :List[Any] = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """dinat"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any], _snake_case : Dict=4, _snake_case : List[Any]=3, _snake_case : Tuple=6_4, _snake_case : int=[3, 4, 6, 5], _snake_case : List[str]=[2, 4, 8, 1_6], _snake_case : Dict=7, _snake_case : Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], _snake_case : Any=3.0, _snake_case : List[str]=True, _snake_case : Optional[int]=0.0, _snake_case : Dict=0.0, _snake_case : Tuple=0.1, _snake_case : List[str]="gelu", _snake_case : int=0.0_2, _snake_case : str=1e-5, _snake_case : List[Any]=0.0, _snake_case : Optional[Any]=None, _snake_case : Union[str, Any]=None, **_snake_case : Optional[Any], ) ->int:
super().__init__(**_snake_case )
snake_case__ : Dict = patch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Union[str, Any] = embed_dim
snake_case__ : str = depths
snake_case__ : Union[str, Any] = len(_snake_case )
snake_case__ : str = num_heads
snake_case__ : Dict = kernel_size
snake_case__ : Any = dilations
snake_case__ : List[str] = mlp_ratio
snake_case__ : Union[str, Any] = qkv_bias
snake_case__ : str = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : Optional[int] = hidden_act
snake_case__ : Any = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : int = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
snake_case__ : Optional[int] = layer_scale_init_value
snake_case__ : List[Any] = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(_snake_case ) + 1 )]
snake_case__ , snake_case__ : int = get_aligned_output_features_output_indices(
out_features=_snake_case, out_indices=_snake_case, stage_names=self.stage_names )
| 277 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Optional[int] ) ->Any:
super().setUp()
snake_case__ : Tuple = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
snake_case__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Union[str, Any], **_snake_case : List[Any] ) ->Dict:
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Dict:
snake_case__ : List[Any] = '<unk> UNwanted , running'
snake_case__ : List[Any] = '<unk> unwanted, running'
return input_text, output_text
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : Dict = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=_snake_case )
snake_case__ : str = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_snake_case, ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), [0, 4, 8, 7] )
def lowercase_ ( self : List[str] ) ->List[Any]:
snake_case__ : str = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : Optional[int] = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=_snake_case )
snake_case__ : Dict = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
snake_case__ : List[Any] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_snake_case ), _snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ), _snake_case )
def lowercase_ ( self : Dict ) ->Any:
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[Any] = len(_snake_case )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ), [1] )
self.assertEqual(tokenizer.decode([1] ), 'new1' )
| 277 | 1 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = np.max(__UpperCAmelCase, axis=-1, keepdims=__UpperCAmelCase )
snake_case_ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=__UpperCAmelCase )
class a ( _lowerCamelCase ):
def A_ ( self : Any , **lowercase_ : Any ):
snake_case_ = {}
if "second_text" in kwargs:
snake_case_ = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def A_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str]=None ):
return self.tokenizer(lowercase_ , text_pair=lowercase_ , return_tensors=self.framework )
def A_ ( self : int , lowercase_ : Union[str, Any] ):
return self.model(**lowercase_ )
def A_ ( self : str , lowercase_ : List[Any] ):
snake_case_ = model_outputs.logits[0].numpy()
snake_case_ = softmax(lowercase_ )
snake_case_ = np.argmax(lowercase_ )
snake_case_ = self.model.config.idalabel[best_class]
snake_case_ = probabilities[best_class].item()
snake_case_ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 72 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class a ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = None
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=0.9_9_9, __UpperCAmelCase="cosine", ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
snake_case_ = []
for i in range(__UpperCAmelCase ):
snake_case_ = i / num_diffusion_timesteps
snake_case_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ), __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase, dtype=torch.floataa )
class a ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : List[str] , lowercase_ : int = 1000 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
snake_case_ = betas_for_alpha_bar(lowercase_ )
snake_case_ = 1.0 - self.betas
snake_case_ = torch.cumprod(self.alphas , dim=0 )
snake_case_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# setable values
snake_case_ = None
snake_case_ = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() )
snake_case_ = variance_type
def A_ ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ):
return sample
def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ):
snake_case_ = num_inference_steps
snake_case_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case_ = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case_ = torch.from_numpy(lowercase_ ).to(lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None ):
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case_ = torch.log(torch.clamp(lowercase_ , min=1e-20 ) )
snake_case_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case_ = variance.log()
snake_case_ = beta.log()
snake_case_ = (predicted_variance + 1) / 2
snake_case_ = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : List[Any] , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : bool = True , ):
snake_case_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case_ ,snake_case_ = torch.split(lowercase_ , sample.shape[1] , dim=1 )
else:
snake_case_ = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
snake_case_ = self.alphas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ = torch.clamp(
lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ = 0
if t > 0:
snake_case_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device )
snake_case_ = self._get_variance(
lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , )
if self.variance_type == "fixed_small_log":
snake_case_ = variance
elif self.variance_type == "learned_range":
snake_case_ = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
''' for the UnCLIPScheduler.''' )
snake_case_ = variance * variance_noise
snake_case_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ )
def A_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
snake_case_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case_ = timesteps.to(original_samples.device )
snake_case_ = alphas_cumprod[timesteps] ** 0.5
snake_case_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_alpha_prod.unsqueeze(-1 )
snake_case_ = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 72 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple = OmegaConf.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE__ : List[str] = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : int = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Any = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = state_dict[key]
SCREAMING_SNAKE_CASE__ : List[str] = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE__ : List[Any] = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE__ : List[str] = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
a :Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
a :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 132 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a :str = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
a :List[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
a :int = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __a (datasets.Metric):
'''simple docstring'''
def _a ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _a ( self , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.0
for i, j in zip(_a , _a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a , _a ) else 0.0
SCREAMING_SNAKE_CASE__ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 132 | 1 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : float = 0.0 ,lowerCamelCase_ : float = 1.0):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000000):
'''simple docstring'''
lowerCAmelCase__ : int = set(range(3 ,lowerCamelCase_ ,2))
primes.add(2)
for p in range(3 ,lowerCamelCase_ ,2):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,lowerCamelCase_ ,lowerCamelCase_)))
lowerCAmelCase__ : int = [float(lowerCamelCase_) for n in range(limit + 1)]
for p in primes:
for n in range(lowerCamelCase_ ,limit + 1 ,lowerCamelCase_):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
# pass variant but use the non-variant filenames
__lowercase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowercase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# pass variant but use the non-variant filenames
__lowercase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__lowercase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__lowercase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase__ ,variant=lowercase__ ) )
| 104 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''instructblip_vision_model'''
def __init__( self : int , _UpperCAmelCase : Dict=1408 , _UpperCAmelCase : List[Any]=6144 , _UpperCAmelCase : List[Any]=39 , _UpperCAmelCase : str=16 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : List[str]=14 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Dict=1e-6 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=1e-10 , _UpperCAmelCase : Optional[Any]=True , **_UpperCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = qkv_bias
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''instructblip_qformer'''
def __init__( self : int , _UpperCAmelCase : str=30522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Optional[int]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Dict=1e-12 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Union[str, Any]="absolute" , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : int=1408 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = cross_attention_frequency
UpperCAmelCase_ = encoder_hidden_size
@classmethod
def lowercase__ ( cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase_ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''instructblip'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=32 , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCAmelCase_ = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCAmelCase_ = InstructBlipVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = InstructBlipQFormerConfig(**_UpperCAmelCase )
UpperCAmelCase_ = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCAmelCase_ = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
UpperCAmelCase_ = self.text_config.tie_word_embeddings
UpperCAmelCase_ = self.text_config.is_encoder_decoder
UpperCAmelCase_ = num_query_tokens
UpperCAmelCase_ = self.vision_config.hidden_size
UpperCAmelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.02
@classmethod
def lowercase__ ( cls : Union[str, Any] , _UpperCAmelCase : InstructBlipVisionConfig , _UpperCAmelCase : InstructBlipQFormerConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : int , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.qformer_config.to_dict()
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 352 |
"""simple docstring"""
from maths.prime_check import is_prime
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase__ )
if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Any = self.dummy_uncond_unet
lowerCAmelCase_ : str = KarrasVeScheduler()
lowerCAmelCase_ : List[str] = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="""numpy""" ).images
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="""numpy""" , return_dict=lowerCamelCase )[0]
lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = """google/ncsnpp-celebahq-256"""
lowerCAmelCase_ : str = UNetaDModel.from_pretrained(lowerCamelCase )
lowerCAmelCase_ : List[Any] = KarrasVeScheduler()
lowerCAmelCase_ : Tuple = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(num_inference_steps=20 , generator=lowerCamelCase , output_type="""numpy""" ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : List[Any] = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=None, lowerCAmelCase_=None, lowerCAmelCase_=None, lowerCAmelCase_=None, lowerCAmelCase_=None, ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE =torch.ones(config.encoder_layers, config.encoder_attention_heads, device=_lowerCamelCase )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE =torch.ones(config.decoder_layers, config.decoder_attention_heads, device=_lowerCamelCase )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE =torch.ones(config.decoder_layers, config.decoder_attention_heads, device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,snake_case : Optional[Any] ,snake_case : int=13 ,snake_case : str=7 ,snake_case : Optional[int]=True ,snake_case : Optional[int]=False ,snake_case : int=99 ,snake_case : Dict=16 ,snake_case : Any=2 ,snake_case : Dict=4 ,snake_case : Union[str, Any]=4 ,snake_case : List[str]="relu" ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.0 ,snake_case : Dict=0.0 ,snake_case : Optional[int]=20 ,snake_case : Any=2 ,snake_case : Any=1 ,snake_case : Dict=0 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =eos_token_id
SCREAMING_SNAKE_CASE =pad_token_id
SCREAMING_SNAKE_CASE =bos_token_id
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE =input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE =decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE =self.get_config()
SCREAMING_SNAKE_CASE =prepare_mam_aaa_inputs_dict(a__ ,a__ ,a__ )
return config, inputs_dict
def _lowerCAmelCase ( self : str ):
return MaMaaaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,encoder_layerdrop=self.encoder_layerdrop ,decoder_layerdrop=self.decoder_layerdrop ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,)
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : List[Any] ,snake_case : int ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =MaMaaaModel(config=a__ ).get_decoder().to(a__ ).eval()
SCREAMING_SNAKE_CASE =inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE =inputs_dict["""attention_mask"""]
SCREAMING_SNAKE_CASE =inputs_dict["""head_mask"""]
# first forward pass
SCREAMING_SNAKE_CASE =model(a__ ,attention_mask=a__ ,head_mask=a__ ,use_cache=a__ )
SCREAMING_SNAKE_CASE =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE =torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE =torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE =model(a__ ,attention_mask=a__ )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE =model(a__ ,attention_mask=a__ ,past_key_values=a__ )[
"""last_hidden_state"""
]
# select random slice
SCREAMING_SNAKE_CASE =ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE =output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ ,a__ ,atol=1e-2 ) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Tuple ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =MaMaaaModel(config=a__ ).to(a__ ).eval()
SCREAMING_SNAKE_CASE =model(**a__ )
SCREAMING_SNAKE_CASE =outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE =model.get_encoder()
encoder.save_pretrained(a__ )
SCREAMING_SNAKE_CASE =MaMaaaEncoder.from_pretrained(a__ ).to(a__ )
SCREAMING_SNAKE_CASE =encoder(inputs_dict['input_ids'] ,attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE =model.get_decoder()
decoder.save_pretrained(a__ )
SCREAMING_SNAKE_CASE =MaMaaaDecoder.from_pretrained(a__ ).to(a__ )
SCREAMING_SNAKE_CASE =decoder(
input_ids=inputs_dict['decoder_input_ids'] ,attention_mask=inputs_dict['decoder_attention_mask'] ,encoder_hidden_states=a__ ,encoder_attention_mask=inputs_dict['attention_mask'] ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Dict ,snake_case : Union[str, Any] ,snake_case : Dict ,snake_case : Optional[int] ,snake_case : Tuple ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=a__ )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE =model_class.from_pretrained(a__ ,output_loading_info=a__ )
self.assertEqual(info['missing_keys'] ,[] )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a__ )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE =model_class(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE =copy.deepcopy(self._prepare_for_class(a__ ,a__ ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE =inputs["""input_ids"""]
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE =inputs["""input_ids"""]
SCREAMING_SNAKE_CASE =inputs.get('decoder_input_ids' ,a__ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' ,a__ )
SCREAMING_SNAKE_CASE =model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE =wte(a__ )
else:
SCREAMING_SNAKE_CASE =wte(a__ )
SCREAMING_SNAKE_CASE =wte(a__ )
with torch.no_grad():
model(**a__ )[0]
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE =input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE =input_ids.ne(1 ).to(a__ )
SCREAMING_SNAKE_CASE =MaMaaaForConditionalGeneration(a__ ).eval().to(a__ )
if torch_device == "cuda":
model.half()
model.generate(a__ ,attention_mask=a__ )
model.generate(num_beams=4 ,do_sample=a__ ,early_stopping=a__ ,num_return_sequences=3 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return torch.tensor(_lowerCamelCase, dtype=torch.long, device=_lowerCamelCase )
_lowerCamelCase =1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Optional[Any] ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(a__ )
SCREAMING_SNAKE_CASE =_long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
SCREAMING_SNAKE_CASE =_long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
SCREAMING_SNAKE_CASE =prepare_mam_aaa_inputs_dict(model.config ,a__ ,a__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**a__ )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 11, 1024) )
self.assertEqual(output.shape ,a__ )
# change to expected output here
SCREAMING_SNAKE_CASE =torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] ,device=a__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,a__ ,atol=a__ ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(a__ )
# change to intended input
SCREAMING_SNAKE_CASE =_long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
SCREAMING_SNAKE_CASE =_long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
SCREAMING_SNAKE_CASE =prepare_mam_aaa_inputs_dict(model.config ,a__ ,a__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**a__ )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape ,a__ )
# change to expected output here
SCREAMING_SNAKE_CASE =torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] ,device=a__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,a__ ,atol=a__ ) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(a__ )
SCREAMING_SNAKE_CASE =MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ,src_lang='fr' ,tgt_lang='en' )
SCREAMING_SNAKE_CASE =[
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE =tokenizer(a__ ,padding=a__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE =model.generate(
input_ids=dct['input_ids'].to(a__ ) ,attention_mask=dct['attention_mask'].to(a__ ) ,num_beams=5 ,forced_bos_token_id=tokenizer.get_lang_id('en' ) ,)
SCREAMING_SNAKE_CASE =[
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
SCREAMING_SNAKE_CASE =tokenizer.batch_decode(
hypotheses_batch.tolist() ,clean_up_tokenization_spaces=a__ ,skip_special_tokens=a__ )
assert generated == expected_en
| 334 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 154 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Dict = StableDiffusionLDMaDPipeline
lowerCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ) -> str:
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
A__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=__UpperCAmelCase ,set_alpha_to_one=__UpperCAmelCase ,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
A__ = CLIPTextModel(__UpperCAmelCase )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> Dict:
if str(__UpperCAmelCase ).startswith('mps' ):
A__ = torch.manual_seed(__UpperCAmelCase )
else:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> str:
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
A__ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self ) -> List[str]:
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 3 * [inputs['prompt']]
# forward
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 3 * [inputs.pop('prompt' )]
A__ = ldmad_pipe.tokenizer(
__UpperCAmelCase ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=__UpperCAmelCase ,return_tensors='pt' ,)
A__ = text_inputs['input_ids'].to(__UpperCAmelCase )
A__ = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
A__ = prompt_embeds
# forward
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self ) -> int:
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 'french fries'
A__ = ldmad_pipe(**__UpperCAmelCase ,negative_prompt=__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
A__ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=0 ) -> Optional[int]:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
A__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> Optional[Any]:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1].flatten()
A__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
A__ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
A__ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=0 ) -> int:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
A__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> str:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = 0.4_9_5_5_8_6
A__ = 0.3_3_7_9_5_5_1_5
A__ = 1_1_2.4_8_5_1_8
A__ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self ) -> Optional[int]:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = 0.4_1_9_4_1_2_7
A__ = 0.3_5_3_7_5_5_8_6
A__ = 0.5_6_3_8_5_0_2
A__ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 154 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''GLPNFeatureExtractor''']
UpperCamelCase_ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
UpperCamelCase__ : List[Any] =binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
_SCREAMING_SNAKE_CASE : Optional[int] = 7_0_1
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_0_0_0_0_0_0_0_0_0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 157 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase : bool , UpperCAmelCase : bool ):
'''simple docstring'''
def run_func(UpperCAmelCase : List[str] ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Tuple ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple =random.Random()
UpperCamelCase__ : List[str] =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = "TensorFlow"
@property
def _lowerCAmelCase ( self : int ):
return tf.__version__
def _lowerCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
UpperCamelCase__ : Optional[int] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : str =self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_inference )
def _lowerCAmelCase ( self : str , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : List[str] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : int =self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_train )
def _lowerCAmelCase ( self : Any , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : Optional[Any] =self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_inference )
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
UpperCamelCase__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : List[Any] =self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_train )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : Optional[Any] =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCamelCase__ : Dict =(
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ : Dict ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ : List[str] =__import__('''transformers''' , fromlist=[model_class] )
UpperCamelCase__ : Optional[int] =getattr(lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =model_cls(lowercase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCamelCase__ : Any =TF_MODEL_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ : Optional[int] =config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
UpperCamelCase__ : List[Any] =random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowercase_ , decoder_input_ids=lowercase_ , training=lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowercase_ , training=lowercase_ )
UpperCamelCase__ : Dict =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : List[str] =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCamelCase__ : Optional[Any] =(
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ : Tuple ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ : List[Any] =__import__('''transformers''' , fromlist=[model_class] )
UpperCamelCase__ : Dict =getattr(lowercase_ , lowercase_ )
UpperCamelCase__ : Tuple =model_cls(lowercase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCamelCase__ : Optional[int] =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ : str =config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
UpperCamelCase__ : Union[str, Any] =random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase__ : Optional[Any] =model(lowercase_ , decoder_input_ids=lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
UpperCamelCase__ : Dict =tf.gradients(lowercase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase__ : Dict =model(lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
UpperCamelCase__ : List[str] =tf.gradients(lowercase_ , model.trainable_variables )
return gradients
UpperCamelCase__ : List[Any] =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(lowercase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase__ : int =timeit.repeat(
lowercase_ , repeat=self.args.repeat , number=10 , )
return min(lowercase_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCAmelCase ( self : Dict , lowercase_ : Callable[[], None] ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
UpperCamelCase__ : Tuple =start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
UpperCamelCase__ : List[str] ='''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase__ : Optional[Any] =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase__ : Dict =nvml.nvmlDeviceGetMemoryInfo(lowercase_ )
UpperCamelCase__ : str =meminfo.used
UpperCamelCase__ : int =Memory(lowercase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
UpperCamelCase__ : Union[str, Any] =None
else:
UpperCamelCase__ : Optional[int] =measure_peak_memory_cpu(lowercase_ )
UpperCamelCase__ : Dict =Memory(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase__ : Tuple =stop_memory_tracing(lowercase_ )
if memory is None:
UpperCamelCase__ : List[Any] =summary.total
else:
UpperCamelCase__ : List[Any] =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 157 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : Tuple = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase_ : List[str] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Tuple = VOCAB_FILES_NAMES
snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = BertTokenizer
def __init__( self : Optional[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : Dict="[SEP]" , __lowerCamelCase : Any="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Union[str, Any] , ):
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase :Union[str, Any] = getattr(__lowerCamelCase , normalizer_state.pop("""type""" ) )
UpperCamelCase :Optional[int] = do_lower_case
UpperCamelCase :Tuple = strip_accents
UpperCamelCase :str = tokenize_chinese_chars
UpperCamelCase :str = normalizer_class(**__lowerCamelCase )
UpperCamelCase :Optional[int] = do_lower_case
def _A ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=None ):
UpperCamelCase :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Optional[int] = [self.sep_token_id]
UpperCamelCase :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
UpperCamelCase :Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 38 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : TransformeraDModel , A_ : AutoencoderKL , A_ : KarrasDiffusionSchedulers , A_ : Optional[Dict[int, str]] = None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ )
# create a imagenet -> id dictionary for easier use
lowerCamelCase_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = dict(sorted(self.labels.items() ) )
def a__ ( self : Optional[int] , A_ : Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
lowerCamelCase_ = list(A_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , A_ : List[int] , A_ : float = 4.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : int = 50 , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = self.transformer.config.sample_size
lowerCamelCase_ = self.transformer.config.in_channels
lowerCamelCase_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , )
lowerCamelCase_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCamelCase_ = torch.tensor(A_ , device=self.device ).reshape(-1 )
lowerCamelCase_ = torch.tensor([1000] * batch_size , device=self.device )
lowerCamelCase_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCamelCase_ = latent_model_input[: len(A_ ) // 2]
lowerCamelCase_ = torch.cat([half, half] , dim=0 )
lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = t
if not torch.is_tensor(A_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCamelCase_ = latent_model_input.device.type == 'mps'
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.floataa if is_mps else torch.floataa
else:
lowerCamelCase_ = torch.intaa if is_mps else torch.intaa
lowerCamelCase_ = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCamelCase_ = self.transformer(
A_ , timestep=A_ , class_labels=A_ ).sample
# perform guidance
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , len(A_ ) // 2 , dim=0 )
lowerCamelCase_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCamelCase_ = torch.cat([half_eps, half_eps] , dim=0 )
lowerCamelCase_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , A_ , dim=1 )
else:
lowerCamelCase_ = noise_pred
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = latent_model_input.chunk(2 , dim=0 )
else:
lowerCamelCase_ = latent_model_input
lowerCamelCase_ = 1 / self.vae.config.scaling_factor * latents
lowerCamelCase_ = self.vae.decode(A_ ).sample
lowerCamelCase_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A_ )
| 204 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar("""T""")
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (position - 1) // 2
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (2 * position) + 1
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (2 * position) + 2
class lowerCAmelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
snake_case_ = []
snake_case_ = {}
snake_case_ = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def UpperCamelCase__ ( self ):
# Check if the priority queue is empty
return self.elements == 0
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
snake_case_ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def UpperCamelCase__ ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
snake_case_ , snake_case_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
snake_case_ , snake_case_ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Update the weight of the given key
snake_case_ = self.position_map[elem]
snake_case_ = (elem, weight)
if position > 0:
snake_case_ = get_parent_position(_UpperCAmelCase )
snake_case_ , snake_case_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
snake_case_ = self.position_map[elem]
if curr_pos == 0:
return None
snake_case_ = get_parent_position(_UpperCAmelCase )
snake_case_ , snake_case_ = self.heap[curr_pos]
snake_case_ , snake_case_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def UpperCamelCase__ ( self , _UpperCAmelCase ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
snake_case_ = self.position_map[elem]
snake_case_ , snake_case_ = self.heap[curr_pos]
snake_case_ = get_child_left_position(_UpperCAmelCase )
snake_case_ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
snake_case_ , snake_case_ = self.heap[child_left_position]
snake_case_ , snake_case_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
snake_case_ , snake_case_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
snake_case_ , snake_case_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Swap the nodes at the given positions
snake_case_ = self.heap[nodea_pos][0]
snake_case_ = self.heap[nodea_pos][0]
snake_case_ , snake_case_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
snake_case_ = nodea_pos
snake_case_ = nodea_pos
class lowerCAmelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
snake_case_ = {}
snake_case_ = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def UpperCamelCase__ ( self , _UpperCAmelCase ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
snake_case_ = {}
self.nodes += 1
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Add an edge between 2 nodes in the graph
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
snake_case_ = weight
snake_case_ = weight
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , )-> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
snake_case_ = {node: maxsize for node in graph.connections}
snake_case_ = {node: None for node in graph.connections}
snake_case_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
snake_case_ = priority_queue.extract_min()
snake_case_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
snake_case_ = node
# running prim's algorithm
while not priority_queue.is_empty():
snake_case_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
snake_case_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
snake_case_ = node
return dist, parent | 360 |
import numpy as np
import datasets
UpperCAmelCase = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
UpperCAmelCase = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
UpperCAmelCase = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# convert to numpy arrays
snake_case_ = np.array(_UpperCAmelCase )
snake_case_ = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
snake_case_ = X - np.mean(_UpperCAmelCase )
snake_case_ = np.cov(reference_distribution.T )
try:
snake_case_ = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
snake_case_ = np.linalg.pinv(_UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 267 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__lowercase : List[Any] = logging.get_logger(__name__)
class __UpperCamelCase :
A_ = None
@experimental
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return _map_with_joblib(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Optional[int] = num_proc if num_proc <= len(_SCREAMING_SNAKE_CASE ) else len(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(_SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) // num_proc
__a : Optional[int] = len(_SCREAMING_SNAKE_CASE ) % num_proc
__a : Optional[int] = div * index + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(_SCREAMING_SNAKE_CASE )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(_SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
__a , __a : Union[str, Any] = None, None
if not disable_tqdm:
__a , __a : Tuple = (RLock(),), tqdm.set_lock
with Pool(_SCREAMING_SNAKE_CASE , initargs=_SCREAMING_SNAKE_CASE , initializer=_SCREAMING_SNAKE_CASE ) as pool:
__a : Union[str, Any] = pool.map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(F"""Finished {num_proc} processes""" )
__a : Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(_SCREAMING_SNAKE_CASE )} objects""" )
return mapped
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(_SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Union[str, Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__a : Optional[int] = None
| 27 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = 1
UpperCamelCase__ = None
UpperCamelCase__ = False
UpperCamelCase__ = None
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
| 362 | '''simple docstring'''
def lowerCamelCase ( ) -> Dict:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 1
while len(UpperCAmelCase__ ) < 1e6:
constant.append(str(UpperCAmelCase__ ) )
i += 1
lowercase_ : int = """""".join(UpperCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 21 | 0 |
# Lint as: python3
import itertools
import os
import re
lowerCamelCase_ = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowerCamelCase_ = re.compile(r'''([a-z\d])([A-Z])''')
lowerCamelCase_ = re.compile(r'''(?<!_)_(?!_)''')
lowerCamelCase_ = re.compile(r'''(_{2,})''')
lowerCamelCase_ = r'''^\w+(\.\w+)*$'''
lowerCamelCase_ = r'''<>:/\|?*'''
def __magic_name__ ( __a : str ):
'''simple docstring'''
UpperCamelCase__ = _uppercase_uppercase_re.sub(R"""\1_\2""" , __a )
UpperCamelCase__ = _lowercase_uppercase_re.sub(R"""\1_\2""" , __a )
return name.lower()
def __magic_name__ ( __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = _single_underscore_re.split(__a )
UpperCamelCase__ = [_multiple_underscores_re.split(__a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__a ) if n != """""" )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__a )
def __magic_name__ ( __a : Tuple , __a : Union[str, Any] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __a ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__a )}-{split}"
def __magic_name__ ( __a : List[str] , __a : List[Any] , __a : str , __a : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ = filename_prefix_for_split(__a , __a )
if filetype_suffix:
prefix += f".{filetype_suffix}"
UpperCamelCase__ = os.path.join(__a , __a )
return f"{filepath}*"
def __magic_name__ ( __a : str , __a : str , __a : Dict , __a : Any=None , __a : Dict=None ):
'''simple docstring'''
UpperCamelCase__ = filename_prefix_for_split(__a , __a )
UpperCamelCase__ = os.path.join(__a , __a )
if shard_lengths:
UpperCamelCase__ = len(__a )
UpperCamelCase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__a )]
if filetype_suffix:
UpperCamelCase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCamelCase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 244 |
from __future__ import annotations
def __magic_name__ ( __a : list[int] , __a : int ):
'''simple docstring'''
if len(__a ) == 0:
return False
UpperCamelCase__ = len(__a ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __a )
else:
return binary_search(a_list[midpoint + 1 :] , __a )
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase_ = [int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase_ = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase_ = '''''' if binary_search(sequence, target) else '''not '''
print(f'{target} was {not_str}found in {sequence}')
| 244 | 1 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__snake_case = data_utils.TransfoXLTokenizer
__snake_case = data_utils.TransfoXLCorpus
__snake_case = data_utils
__snake_case = data_utils
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase , "rb" ) as fp:
snake_case : int = pickle.load(lowercase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case : str = corpus.vocab.__dict__
torch.save(lowercase , lowercase )
snake_case : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowercase )
snake_case : Dict = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowercase , lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case : Union[str, Any] = os.path.abspath(lowercase )
snake_case : str = os.path.abspath(lowercase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case : int = TransfoXLConfig()
else:
snake_case : Optional[int] = TransfoXLConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : str = TransfoXLLMHeadModel(lowercase )
snake_case : str = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase )
# Save pytorch-model
snake_case : Union[str, Any] = os.path.join(lowercase , lowercase )
snake_case : Optional[Any] = os.path.join(lowercase , lowercase )
print(F'Save PyTorch model to {os.path.abspath(lowercase )}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {os.path.abspath(lowercase )}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__snake_case = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 112 |
"""simple docstring"""
import string
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
snake_case : List[str] = ""
for i in sequence:
snake_case : Optional[Any] = ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
snake_case : Dict = string.ascii_letters
snake_case : List[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __lowerCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running performance benchmarks..." )
snake_case : Optional[int] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 112 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Dict = hf_hub_url(repo_id=_lowerCAmelCase , path=_lowerCAmelCase , revision=_lowerCAmelCase )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_lowerCAmelCase )}"
| 35 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _a , )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = RobertaConfig
lowercase = "roberta"
def __init__( self : Any , snake_case_ : List[str] ):
super().__init__(snake_case_ )
snake_case__ : List[str] = RobertaEmbeddings(snake_case_ )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _a , )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = RobertaConfig
lowercase = "roberta"
def __init__( self : int , snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ )
snake_case__ : int = config.num_labels
snake_case__ : Optional[Any] = config.num_hidden_layers
snake_case__ : Union[str, Any] = DeeRobertaModel(snake_case_ )
snake_case__ : Any = nn.Dropout(config.hidden_dropout_prob )
snake_case__ : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : List[Any]=None , snake_case_ : Tuple=None , snake_case_ : Dict=None , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : List[str]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=-1 , snake_case_ : Optional[Any]=False , ):
snake_case__ : Any = self.num_layers
try:
snake_case__ : List[str] = self.roberta(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , )
snake_case__ : Union[str, Any] = outputs[1]
snake_case__ : Tuple = self.dropout(snake_case_ )
snake_case__ : int = self.classifier(snake_case_ )
snake_case__ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ : List[str] = e.message
snake_case__ : List[Any] = e.exit_layer
snake_case__ : str = outputs[0]
if not self.training:
snake_case__ : Optional[Any] = entropy(snake_case_ )
snake_case__ : Dict = []
snake_case__ : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ : List[Any] = MSELoss()
snake_case__ : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ : Optional[int] = CrossEntropyLoss()
snake_case__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case__ : Tuple = []
for highway_exit in outputs[-1]:
snake_case__ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ : Optional[Any] = MSELoss()
snake_case__ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ : str = CrossEntropyLoss()
snake_case__ : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case_ )
if train_highway:
snake_case__ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ : Any = (loss,) + outputs
if not self.training:
snake_case__ : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 43 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = TextToVideoSDPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCamelCase ( self : Dict ):
torch.manual_seed(0 )
snake_case__ : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
snake_case__ : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
snake_case__ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
snake_case__ : List[Any] = CLIPTextModel(snake_case_ )
snake_case__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase ( self : Tuple , snake_case_ : Any , snake_case_ : Optional[Any]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(snake_case_ )
else:
snake_case__ : List[str] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : Any = TextToVideoSDPipeline(**snake_case_ )
snake_case__ : Dict = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = self.get_dummy_inputs(snake_case_ )
snake_case__ : Tuple = """np"""
snake_case__ : Tuple = sd_pipe(**snake_case_ ).frames
snake_case__ : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
snake_case__ : Any = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : int ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase ( self : Any ):
pass
def lowerCamelCase ( self : Dict ):
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Tuple ):
snake_case__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
snake_case__ : str = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case__ : int = pipe.to("""cuda""" )
snake_case__ : str = """Spiderman is surfing"""
snake_case__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : Tuple = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="""pt""" ).frames
snake_case__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase ( self : Any ):
snake_case__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
snake_case__ : List[str] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case__ : str = pipe.to("""cuda""" )
snake_case__ : Union[str, Any] = """Spiderman is surfing"""
snake_case__ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : Optional[Any] = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="""pt""" ).frames
snake_case__ : Union[str, Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 43 | 1 |
from sklearn.metrics import fa_score
import datasets
snake_case_ = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
snake_case_ = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
snake_case_ = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def a (self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def a (self : List[str] , a__ : str , a__ : Optional[Any] , a__ : List[Any]=None , a__ : Optional[int]=1 , a__ : List[str]="binary" , a__ : Tuple=None ):
"""simple docstring"""
__snake_case = fa_score(
_UpperCamelCase , _UpperCamelCase , labels=_UpperCamelCase , pos_label=_UpperCamelCase , average=_UpperCamelCase , sample_weight=_UpperCamelCase )
return {"f1": float(_UpperCamelCase ) if score.size == 1 else score}
| 24 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
from __future__ import annotations
import math
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
def _snake_case( ) -> None:
'''simple docstring'''
A__ = [90, 23, 6, 33, 21, 65, 123, 34423]
A__ = math.log(len(lowerCAmelCase__ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 360 |
from collections.abc import Sequence
def _snake_case( SCREAMING_SNAKE_CASE__ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A__ = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
A__ = nums[i]
A__ = max(SCREAMING_SNAKE_CASE__ , ans + num , SCREAMING_SNAKE_CASE__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase_ = int(input("Enter number of elements : ").strip())
lowercase_ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 282 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a_ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( _a):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( _a , _a):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE : Any = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase__ ( _a , _a):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE : int = False
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description="Training")
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists.")
parser.add_argument(
"--dump_path" , type=_a , required=_a , help="The output directory (log, checkpoints, parameters, etc.)")
parser.add_argument(
"--data_file" , type=_a , required=_a , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=_a , choices=["distilbert", "roberta", "gpt2"] , required=_a , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=_a , required=_a , help="Path to the student configuration.")
parser.add_argument(
"--student_pretrained_weights" , default=_a , type=_a , help="Load student initialization checkpoint.")
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_a , help="Teacher type (BERT, RoBERTa).")
parser.add_argument("--teacher_name" , type=_a , required=_a , help="The teacher model.")
parser.add_argument("--temperature" , default=2.0 , type=_a , help="Temperature for the softmax temperature.")
parser.add_argument(
"--alpha_ce" , default=0.5 , type=_a , help="Linear weight for the distillation loss. Must be >=0.")
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=_a , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=_a , help="Linear weight for the CLM loss. Must be >=0.")
parser.add_argument("--alpha_mse" , default=0.0 , type=_a , help="Linear weight of the MSE loss. Must be >=0.")
parser.add_argument(
"--alpha_cos" , default=0.0 , type=_a , help="Linear weight of the cosine embedding loss. Must be >=0.")
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.")
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=_a , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=_a , help="Proportion of tokens to mask out.")
parser.add_argument("--word_keep" , default=0.1 , type=_a , help="Proportion of tokens to keep.")
parser.add_argument("--word_rand" , default=0.1 , type=_a , help="Proportion of tokens to randomly replace.")
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=_a , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=_a , help="The token counts in the data_file for MLM.")
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=_a , default=3 , help="Number of pass on the whole dataset.")
parser.add_argument("--batch_size" , type=_a , default=5 , help="Batch size (for each process).")
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_a , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=_a , help="Linear warmup proportion.")
parser.add_argument("--weight_decay" , default=0.0 , type=_a , help="Weight decay if we apply some.")
parser.add_argument("--learning_rate" , default=5E-4 , type=_a , help="The initial learning rate for Adam.")
parser.add_argument("--adam_epsilon" , default=1E-6 , type=_a , help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm" , default=5.0 , type=_a , help="Max gradient norm.")
parser.add_argument("--initializer_range" , default=0.02 , type=_a , help="Random initialization range.")
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_a , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=_a , default=1 , help="Number of GPUs in the node.")
parser.add_argument("--local_rank" , type=_a , default=-1 , help="Distributed training - Local rank")
parser.add_argument("--seed" , type=_a , default=56 , help="Random seed")
parser.add_argument("--log_interval" , type=_a , default=500 , help="Tensorboard logging interval.")
parser.add_argument("--checkpoint_interval" , type=_a , default=4000 , help="Checkpoint interval.")
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
sanity_checks(_a)
# ARGS #
init_gpu_params(_a)
set_seed(_a)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
" itUse `--force` if you want to overwrite it")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(f"Experiment will be dumped and logged in {args.dump_path}")
# SAVE PARAMS #
logger.info(f"Param: {args}")
with open(os.path.join(args.dump_path , "parameters.json") , "w") as f:
json.dump(vars(_a) , _a , indent=4)
git_log(args.dump_path)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE : str = teacher_tokenizer_class.from_pretrained(args.teacher_name)
SCREAMING_SNAKE_CASE : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE : Dict = tokenizer.all_special_tokens.index(_a)
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}")
SCREAMING_SNAKE_CASE : Optional[Any] = special_tok_ids
SCREAMING_SNAKE_CASE : List[str] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}")
with open(args.data_file , "rb") as fp:
SCREAMING_SNAKE_CASE : str = pickle.load(_a)
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)")
with open(args.token_counts , "rb") as fp:
SCREAMING_SNAKE_CASE : Any = pickle.load(_a)
SCREAMING_SNAKE_CASE : List[Any] = np.maximum(_a , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_a)
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = LmSeqsDataset(params=_a , data=_a)
logger.info("Data loader created.")
# STUDENT #
logger.info(f"Loading student config from {args.student_config}")
SCREAMING_SNAKE_CASE : List[str] = student_config_class.from_pretrained(args.student_config)
SCREAMING_SNAKE_CASE : List[str] = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}")
SCREAMING_SNAKE_CASE : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=_a)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class(_a)
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}")
logger.info("Student loaded.")
# TEACHER #
SCREAMING_SNAKE_CASE : Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_a)
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}")
logger.info(f"Teacher loaded from {args.teacher_name}.")
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_a , _a)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_a , _a)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : List[str] = Distiller(
params=_a , dataset=_a , token_probs=_a , student=_a , teacher=_a)
distiller.train()
logger.info("Let's go get some drinks.")
if __name__ == "__main__":
main() | 76 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__magic_name__ : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_A, _A ) ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__magic_name__ : str = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_A )
try:
if dataset.shape[1] != value_array.shape[1]:
__magic_name__ : Optional[Any] = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__magic_name__ : List[Any] = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_A )
__magic_name__ : Dict = []
for value in value_array:
__magic_name__ : Tuple = euclidean(_A, dataset[0] )
__magic_name__ : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
__magic_name__ : Any = euclidean(_A, _A )
if dist > temp_dist:
__magic_name__ : Dict = temp_dist
__magic_name__ : Any = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return np.dot(_A, _A ) / (norm(_A ) * norm(_A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
import pprint
import requests
snake_case__ : Tuple = 'https://zenquotes.io/api'
def _a ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _a ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ : Tuple = random_quotes()
pprint.pprint(response)
| 117 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
snake_case__ : int = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
snake_case__ : str = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__(self :List[str] , _UpperCamelCase :List[str]=None , _UpperCamelCase :Optional[Any]=None , _UpperCamelCase :str=True , _UpperCamelCase :Optional[Any]="[UNK]" , _UpperCamelCase :Tuple="[SEP]" , _UpperCamelCase :List[Any]="[PAD]" , _UpperCamelCase :int="[CLS]" , _UpperCamelCase :Optional[int]="[MASK]" , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :str=None , **_UpperCamelCase :List[str] , )-> str:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
__A = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**_UpperCamelCase )
__A = do_lower_case
def _lowerCAmelCase (self :Any , _UpperCamelCase :int , _UpperCamelCase :List[str]=None )-> List[Any]:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase (self :Any , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
__A = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 117 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vinai/phobert-base''': 2_5_6,
'''vinai/phobert-large''': 2_5_6,
}
def lowerCAmelCase_( lowercase_ : str ) -> Tuple:
_lowerCamelCase = set()
_lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase = char
_lowerCamelCase = set(lowercase_ )
return pairs
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , **lowerCamelCase__ , ):
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = merges_file
_lowerCamelCase = {}
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
self.add_from_file(lowerCamelCase__ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase = merges_handle.read().split('''\n''' )[:-1]
_lowerCamelCase = [tuple(merge.split()[:-1] ) for merge in merges]
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = {}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self ):
return len(self.encoder )
def snake_case__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_lowerCamelCase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase = bigram
_lowerCamelCase = []
_lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
_lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_lowerCamelCase = get_pairs(lowerCamelCase__ )
_lowerCamelCase = '@@ '.join(lowerCamelCase__ )
_lowerCamelCase = word[:-4]
_lowerCamelCase = word
return word
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
_lowerCamelCase = re.findall(R'''\S+\n?''' , lowerCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(''' ''' ) ) )
return split_tokens
def snake_case__ ( self , lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self , lowerCamelCase__ ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ' '.join(lowerCamelCase__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.merges_file , lowerCamelCase__ )
return out_vocab_file, out_merge_file
def snake_case__ ( self , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(lowerCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
_lowerCamelCase = f.readlines()
for lineTmp in lines:
_lowerCamelCase = lineTmp.strip()
_lowerCamelCase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
_lowerCamelCase = line[:idx]
_lowerCamelCase = len(self.encoder )
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __lowerCAmelCase ( a__ , a__=False ) -> int:
try:
__a = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__a = default
else:
# KEY is set, convert it to True or False.
try:
__a = strtobool(lowerCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
A : Tuple = parse_flag_from_env('RUN_SLOW', default=False)
def __lowerCAmelCase ( a__ ) -> Any:
return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> int:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> Any:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> Dict:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> Tuple:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> Optional[int]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__=None , a__=None ) -> str:
if test_case is None:
return partial(lowerCAmelCase__ , version=lowerCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , F"""test requires torch version >= {version}""" )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[str]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> List[Any]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ )
def __lowerCAmelCase ( a__ ) -> str:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ )
A : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __lowerCAmelCase ( a__ ) -> int:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ )
class __A( unittest.TestCase ):
snake_case_ = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> int:
'''simple docstring'''
__a = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Dict:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A__ )
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = mocks if isinstance(A__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __lowerCAmelCase ( a__ ) -> int:
__a = AcceleratorState()
__a = tensor[None].clone().to(state.device )
__a = gather(lowerCAmelCase__ ).cpu()
__a = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase__ ):
return False
return True
class __A:
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = returncode
__a = stdout
__a = stderr
async def __lowerCAmelCase ( a__ , a__ ) -> Tuple:
while True:
__a = await stream.readline()
if line:
callback(lowerCAmelCase__ )
else:
break
async def __lowerCAmelCase ( a__ , a__=None , a__=None , a__=None , a__=False , a__=False ) -> Optional[Any]:
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) )
__a = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__a = []
__a = []
def tee(a__ , a__ , a__ , a__="" ):
__a = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCAmelCase__ )
if not quiet:
print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCAmelCase__ , )
return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCAmelCase ( a__ , a__=None , a__=None , a__=180 , a__=False , a__=True ) -> Optional[Any]:
__a = asyncio.get_event_loop()
__a = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) )
__a = ''' '''.join(lowerCAmelCase__ )
if result.returncode > 0:
__a = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __A( SCREAMING_SNAKE_CASE__ ):
pass
def __lowerCAmelCase ( a__ , a__=False ) -> Dict:
try:
__a = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase__ , '''decode''' ):
__a = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e | 6 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase : int = """\
"""
UpperCamelCase : str = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase : Optional[int] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string'),
}) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Any=None):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a : Union[str, Any] = 'cuda'
else:
a : int = 'cuda' if torch.cuda.is_available() else 'cpu'
a : Optional[int] = AutoModelForCausalLM.from_pretrained(UpperCAmelCase_)
a : str = model.to(UpperCAmelCase_)
a : int = AutoTokenizer.from_pretrained(UpperCAmelCase_)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a : Optional[Any] = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase_) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a : Dict = model.config.max_length - 1
else:
a : Tuple = model.config.max_length
a : Any = tokenizer(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors='pt' , return_attention_mask=UpperCAmelCase_ , ).to(UpperCAmelCase_)
a : Union[str, Any] = encodings['input_ids']
a : Optional[int] = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a : Tuple = []
a : int = CrossEntropyLoss(reduction='none')
for start_index in logging.tqdm(range(0 , len(UpperCAmelCase_) , UpperCAmelCase_)):
a : Union[str, Any] = min(start_index + batch_size , len(UpperCAmelCase_))
a : List[Any] = encoded_texts[start_index:end_index]
a : List[str] = attn_masks[start_index:end_index]
if add_start_token:
a : int = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(UpperCAmelCase_)
a : Tuple = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
a : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(UpperCAmelCase_), attn_mask] , dim=1)
a : Optional[Any] = encoded_batch
with torch.no_grad():
a : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_).logits
a : Tuple = out_logits[..., :-1, :].contiguous()
a : Dict = labels[..., 1:].contiguous()
a : Optional[int] = attn_mask[..., 1:].contiguous()
a : Dict = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , UpperCAmelCase_) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase_)}
| 345 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
if "model" in sd.keys():
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
__lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(A__ )
__lowerCamelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowerCamelCase = sd.pop(A__ )
__lowerCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowerCamelCase = sd[key]
# We split QKV in separate Q,K,V
__lowerCamelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
__lowerCamelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
__lowerCamelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
__lowerCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = torch.split(A__ , depth // 3 , dim=0 )
__lowerCamelCase = q
__lowerCamelCase = k
__lowerCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( A__ : Tuple , A__ : List[Any] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = load_checkpoint(A__ )
if config is not None:
__lowerCamelCase = OPTConfig.from_pretrained(A__ )
else:
__lowerCamelCase = OPTConfig()
__lowerCamelCase = OPTModel(A__ ).half().eval()
model.load_state_dict(A__ )
# Check results
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 12 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> Dict:
UpperCAmelCase : int = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase : int = test_metrics
@require_cpu
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCAmelCase_ ( self : int ) -> Tuple:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
self.test_metrics.main()
@require_multi_gpu
def UpperCAmelCase_ ( self : int ) -> Tuple:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 280 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = TextDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = text_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [text_path]
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : Union[str, Any] = {'text': 'string'}
UpperCAmelCase : List[Any] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
UpperCAmelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = TextDatasetReader({'train': text_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : List[Any] = TextDatasetReader({'train': text_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : int = {split: text_path}
else:
UpperCAmelCase : int = 'train'
UpperCAmelCase : Any = {'train': text_path, 'test': text_path}
UpperCAmelCase : Dict = tmp_path / 'cache'
UpperCAmelCase : Any = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 280 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : Optional[int] = (DEISMultistepScheduler,)
_lowercase : str = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> List[str]:
__lowerCAmelCase: int = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple=0 , **UpperCAmelCase : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase: int = dict(self.forward_default_kwargs )
__lowerCAmelCase: List[Any] = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Dict = self.dummy_sample
__lowerCAmelCase: Optional[Any] = 0.1 * sample
__lowerCAmelCase: List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: str = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: int = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Tuple = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: int = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Tuple = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
pass
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int]=0 , **UpperCAmelCase : int ) -> List[str]:
__lowerCAmelCase: Dict = dict(self.forward_default_kwargs )
__lowerCAmelCase: List[Any] = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self.dummy_sample
__lowerCAmelCase: Tuple = 0.1 * sample
__lowerCAmelCase: Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: List[str] = self.get_scheduler_config()
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Union[str, Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: int = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any=None , **UpperCAmelCase : Dict ) -> List[Any]:
if scheduler is None:
__lowerCAmelCase: Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: str = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = 1_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: int = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> int:
__lowerCAmelCase: Optional[Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase: Optional[int] = kwargs.pop('num_inference_steps' , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: int = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Any = self.dummy_sample
__lowerCAmelCase: str = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , 'set_timesteps' ):
__lowerCAmelCase: List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase: List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowerCAmelCase: int = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase: Dict = scheduler.timesteps[5]
__lowerCAmelCase: str = scheduler.timesteps[6]
__lowerCAmelCase: List[str] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Union[str, Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self : Tuple ) -> Dict:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: int = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: str = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: str = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='deis' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Dict ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> int:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: List[Any] = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: str = self.full_loop()
__lowerCAmelCase: int = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Any:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Any = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCAmelCase_ ( snake_case_=None,snake_case_=None ):
return field(default_factory=lambda: default,metadata=snake_case_ )
@dataclass
class lowercase :
_a = field(
metadata={"help": "The csv file to plot."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Disable logarithmic scale when plotting"},)
_a = field(
default=UpperCamelCase__,metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."},)
_a = list_field(
default=UpperCamelCase__,metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def lowerCAmelCase_ ( snake_case_ ):
try:
int(snake_case_ )
return True
except ValueError:
return False
def lowerCAmelCase_ ( snake_case_ ):
try:
float(snake_case_ )
return True
except ValueError:
return False
class lowercase :
def __init__( self , _a ) -> Optional[Any]:
_A : int = args
_A : Optional[Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_A : List[str] = csv.DictReader(_a )
for row in reader:
_A : Optional[Any] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_A : Union[str, Any] = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_A : int = float(row["""result"""] )
def a__ ( self ) -> Any:
_A , _A : Tuple = plt.subplots()
_A : Union[str, Any] = """Time usage""" if self.args.is_time else """Memory usage"""
_A : List[Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_A : Optional[Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_A : List[Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_A : List[str] = self.result_dict[model_name]["""result"""]
((_A) , (_A)) : Dict = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_A : Union[str, Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_A : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_a , )
else:
_A : int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_A) , (_A)) : Any = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_A : List[Any] = np.asarray(_a , _a )[: len(_a )]
plt.scatter(
_a , _a , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_a , _a , """--""" )
title_str += F''' {label_model_name} vs.'''
_A : Optional[Any] = title_str[:-4]
_A : Optional[int] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(_a )
plt.xlabel(_a )
plt.ylabel(_a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCAmelCase_ ( ):
_A : List[Any] = HfArgumentParser(snake_case_ )
_A : Tuple = parser.parse_args_into_dataclasses()[0]
_A : Dict = Plot(args=snake_case_ )
plot.plot()
if __name__ == "__main__":
main()
| 26 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
])
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding="utf-8" ,check=_snake_case ,)
assert hasattr(self ,"env" )
def UpperCamelCase__ ( self ,_snake_case=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f'''{self.env.base_job_name}-single''' ,instance_count=_snake_case ,instance_type=self.instance_type ,debugger_hook_config=_snake_case ,hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version="py36" ,)
def UpperCamelCase__ ( self ,_snake_case ):
TrainingJobAnalytics(_snake_case ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase__ ( self ):
# create estimator
UpperCAmelCase_ : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase_ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' ,"w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} ,_snake_case )
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int ="mra"
def __init__( self ,_snake_case=5_02_65 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=1 ,_snake_case=0.02 ,_snake_case=1E-5 ,_snake_case="absolute" ,_snake_case=4 ,_snake_case="full" ,_snake_case=0 ,_snake_case=0 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Optional[Any] = block_per_row
UpperCAmelCase_ : Any = approx_mode
UpperCAmelCase_ : Dict = initial_prior_first_n_blocks
UpperCAmelCase_ : str = initial_prior_diagonal_n_blocks
| 67 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a_ : List[Any] = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = True
while ask_again:
SCREAMING_SNAKE_CASE = input(_UpperCAmelCase)
try:
if default is not None and len(_UpperCAmelCase) == 0:
return default
return convert_value(_UpperCAmelCase) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=[] , _UpperCAmelCase=None , _UpperCAmelCase=0):
SCREAMING_SNAKE_CASE = BulletMenu(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = menu.run(default_choice=_UpperCAmelCase)
return convert_value(_UpperCAmelCase) if convert_value is not None else result
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value])
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value])
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value])
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(_UpperCAmelCase)
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value])
def lowerCamelCase__ (_UpperCAmelCase):
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = super()._format_usage(a , a , a , a)
SCREAMING_SNAKE_CASE = usage.replace('<command> [<args>] ' , '')
return usage
| 137 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {'vocab_file': 'spiece.model'}
a_ : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
a_ : Tuple = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
a_ : int = 0
a_ : Optional[int] = 1
a_ : int = 2
a_ : Union[str, Any] = 3
a_ : List[str] = 4
class _snake_case ( A__ ):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = '''left'''
def __init__( self , a , a=False , a=True , a=False , a="<s>" , a="</s>" , a="<unk>" , a="<sep>" , a="<pad>" , a="<cls>" , a="<mask>" , a=["<eop>", "<eod>"] , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.sp_model)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
if self.remove_space:
SCREAMING_SNAKE_CASE = ' '.join(inputs.strip().split())
else:
SCREAMING_SNAKE_CASE = inputs
SCREAMING_SNAKE_CASE = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFKD' , a)
SCREAMING_SNAKE_CASE = ''.join([c for c in outputs if not unicodedata.combining(a)])
if self.do_lower_case:
SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.preprocess_text(a)
SCREAMING_SNAKE_CASE = self.sp_model.encode(a , out_type=a)
SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(a) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a)
else:
new_pieces.append(a)
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return self.sp_model.PieceToId(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
return self.sp_model.IdToPiece(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = ''.join(a).replace(a , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , a , a = False , a = None , a = True , **a , ) -> str:
SCREAMING_SNAKE_CASE = kwargs.pop('use_source_tokenizer' , a)
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(a , skip_special_tokens=a)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
SCREAMING_SNAKE_CASE = []
sub_texts.append(a)
else:
current_sub_text.append(a)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE = ''.join(a)
SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE = self.clean_up_tokenization(a)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is not None:
return ([0] * len(a)) + [1] + ([0] * len(a)) + [1, 1]
return ([0] * len(a)) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a)
elif not os.path.isfile(self.vocab_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (out_vocab_file,)
| 137 | 1 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCamelCase = datasets.load_iris()
__lowerCamelCase = np.array(data["data"])
__lowerCamelCase = np.array(data["target"])
__lowerCamelCase = data["""target_names"""]
__lowerCamelCase = train_test_split(X, y)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return np.linalg.norm(np.array(UpperCamelCase__ ) - np.array(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=5 ):
"""simple docstring"""
A__ = zip(UpperCamelCase__ , UpperCamelCase__ )
# List of distances of all points from the point to be classified
A__ = []
for data_point in data:
A__ = euclidean_distance(data_point[0] , UpperCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A__ = [i[1] for i in sorted(UpperCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A__ = Counter(UpperCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 359 | """simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : str = 'AutoTokenizer'
lowerCAmelCase__ : int = ['tokenizer']
lowerCAmelCase__ : int = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[str]:
super().__init__(__UpperCAmelCase )
A__ = speaker_embeddings
@classmethod
def snake_case__ ( cls ,__UpperCAmelCase ,__UpperCAmelCase="speaker_embeddings_path.json" ,**__UpperCAmelCase ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
A__ = get_file_from_repo(
__UpperCAmelCase ,__UpperCAmelCase ,subfolder=kwargs.pop('subfolder' ,__UpperCAmelCase ) ,cache_dir=kwargs.pop('cache_dir' ,__UpperCAmelCase ) ,force_download=kwargs.pop('force_download' ,__UpperCAmelCase ) ,proxies=kwargs.pop('proxies' ,__UpperCAmelCase ) ,resume_download=kwargs.pop('resume_download' ,__UpperCAmelCase ) ,local_files_only=kwargs.pop('local_files_only' ,__UpperCAmelCase ) ,use_auth_token=kwargs.pop('use_auth_token' ,__UpperCAmelCase ) ,revision=kwargs.pop('revision' ,__UpperCAmelCase ) ,)
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(__UpperCAmelCase ,__UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
A__ = None
else:
with open(__UpperCAmelCase ) as speaker_embeddings_json:
A__ = json.load(__UpperCAmelCase )
else:
A__ = None
A__ = AutoTokenizer.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
return cls(tokenizer=__UpperCAmelCase ,speaker_embeddings=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="speaker_embeddings_path.json" ,__UpperCAmelCase="speaker_embeddings" ,__UpperCAmelCase = False ,**__UpperCAmelCase ,) -> Tuple:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ,'v2' ) ,exist_ok=__UpperCAmelCase )
A__ = {}
A__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A__ = self._load_voice_preset(__UpperCAmelCase )
A__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,__UpperCAmelCase ,f'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=__UpperCAmelCase ,)
A__ = os.path.join(__UpperCAmelCase ,f'''{prompt_key}_{key}.npy''' )
A__ = tmp_dict
with open(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ,'w' ) as fp:
json.dump(__UpperCAmelCase ,__UpperCAmelCase )
super().save_pretrained(__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase = None ,**__UpperCAmelCase ) -> List[Any]:
A__ = self.speaker_embeddings[voice_preset]
A__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
A__ = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,__UpperCAmelCase ) ,cache_dir=kwargs.pop('cache_dir' ,__UpperCAmelCase ) ,force_download=kwargs.pop('force_download' ,__UpperCAmelCase ) ,proxies=kwargs.pop('proxies' ,__UpperCAmelCase ) ,resume_download=kwargs.pop('resume_download' ,__UpperCAmelCase ) ,local_files_only=kwargs.pop('local_files_only' ,__UpperCAmelCase ) ,use_auth_token=kwargs.pop('use_auth_token' ,__UpperCAmelCase ) ,revision=kwargs.pop('revision' ,__UpperCAmelCase ) ,)
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
A__ = np.load(__UpperCAmelCase )
return voice_preset_dict
def snake_case__ ( self ,__UpperCAmelCase = None ) -> Dict:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="pt" ,__UpperCAmelCase=2_56 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> Tuple:
if voice_preset is not None and not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
if (
isinstance(__UpperCAmelCase ,__UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A__ = self._load_voice_preset(__UpperCAmelCase )
else:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
A__ = voice_preset + '.npz'
A__ = np.load(__UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__UpperCAmelCase ,**__UpperCAmelCase )
A__ = BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
A__ = self.tokenizer(
__UpperCAmelCase ,return_tensors=__UpperCAmelCase ,padding='max_length' ,max_length=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,**__UpperCAmelCase ,)
if voice_preset is not None:
A__ = voice_preset
return encoded_text
| 154 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=30 , _A=2 , _A=3 , _A=True , _A=True , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=10 , _A=0.0_2 , _A=None , ):
__A : str = parent
__A : int = batch_size
__A : Any = image_size
__A : Any = patch_size
__A : Optional[Any] = num_channels
__A : Union[str, Any] = is_training
__A : int = use_labels
__A : Any = hidden_size
__A : Tuple = num_hidden_layers
__A : Any = num_attention_heads
__A : List[Any] = intermediate_size
__A : int = hidden_act
__A : Any = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Optional[Any] = type_sequence_label_size
__A : Optional[Any] = initializer_range
__A : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : List[Any] = (image_size // patch_size) ** 2
__A : Optional[Any] = num_patches + 1
def UpperCAmelCase_ ( self ):
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : List[str] = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Optional[int] = ViTMSNModel(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : str = self.type_sequence_label_size
__A : Optional[Any] = ViTMSNForImageClassification(_A )
model.to(_A )
model.eval()
__A : Dict = model(_A , labels=_A )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A : str = 1
__A : Any = ViTMSNForImageClassification(_A )
model.to(_A )
model.eval()
__A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : str = config_and_inputs
__A : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase : str = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
def UpperCAmelCase_ ( self ):
__A : Dict = ViTMSNModelTester(self )
__A : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_A )
__A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = ViTMSNModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _SCREAMING_SNAKE_CASE ( ) -> Any:
__A : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
torch.manual_seed(2 )
__A : Union[str, Any] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_A )
__A : Dict = self.default_image_processor
__A : Tuple = prepare_img()
__A : List[Any] = image_processor(images=_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
__A : Union[str, Any] = model(**_A )
# verify the logits
__A : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__A : str = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """BridgeTowerImageProcessor"""
lowerCAmelCase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]):
'''simple docstring'''
super().__init__(__a , __a)
def __call__( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict = None , _lowerCAmelCase : Optional[Any] = True , _lowerCAmelCase : Optional[int] = False , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Union[str, Any] = 0 , _lowerCAmelCase : Dict = None , _lowerCAmelCase : int = None , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : List[Any] = False , _lowerCAmelCase : Optional[int] = False , _lowerCAmelCase : Any = False , _lowerCAmelCase : Any = False , _lowerCAmelCase : str = True , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
__lowercase =self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel_values + pixel_mask
__lowercase =self.image_processor(
__a , return_tensors=__a , do_normalize=__a , do_center_crop=__a , **__a)
encoding.update(__a)
return encoding
def __lowerCamelCase ( self : Tuple , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : str):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def __lowerCamelCase ( self : Optional[int] , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 361 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self : Any , UpperCAmelCase : UNetaDModel , UpperCAmelCase : KarrasVeScheduler ) -> List[str]:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : Tuple , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase__ : Any = self.unet.config.sample_size
lowerCamelCase__ : Any = (batch_size, 3, img_size, img_size)
lowerCamelCase__ : Dict = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCamelCase__ : Dict = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCamelCase__ : List[Any] = self.scheduler.schedule[t]
lowerCamelCase__ : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCamelCase__ , lowerCamelCase__ : Any = self.scheduler.add_noise_to_input(UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase__ : Optional[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCamelCase__ : Dict = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase__ : Optional[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowerCamelCase__ : Optional[int] = self.scheduler.step_correct(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
lowerCamelCase__ : Union[str, Any] = step_output.prev_sample
lowerCamelCase__ : Optional[int] = (sample / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : List[Any] = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 50 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a_ :
@property
def __a ( self :Union[str, Any]) -> Any:
return self.get_dummy_input()
@property
def __a ( self :int) -> Any:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.")
def __a ( self :List[Any] , _lowercase :List[str]=True , _lowercase :str=False , _lowercase :Any=False , _lowercase :str=False , ) -> Optional[int]:
UpperCAmelCase_ = 4
UpperCAmelCase_ = 32
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = torch.device(_lowercase)
UpperCAmelCase_ = (batch_size, num_channels) + sizes
UpperCAmelCase_ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase)
UpperCAmelCase_ = {'''hidden_states''': hidden_states}
if include_temb:
UpperCAmelCase_ = 128
UpperCAmelCase_ = randn_tensor((batch_size, temb_channels) , generator=_lowercase , device=_lowercase)
if include_res_hidden_states_tuple:
UpperCAmelCase_ = torch.manual_seed(1)
UpperCAmelCase_ = (randn_tensor(_lowercase , generator=_lowercase , device=_lowercase),)
if include_encoder_hidden_states:
UpperCAmelCase_ = floats_tensor((batch_size, 32, 32)).to(_lowercase)
if include_skip_sample:
UpperCAmelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowercase , device=_lowercase)
return dummy_input
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
UpperCAmelCase_ = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''')
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def __a ( self :Optional[Any] , _lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**_lowercase)
unet_block.to(_lowercase)
unet_block.eval()
with torch.no_grad():
UpperCAmelCase_ = unet_block(**_lowercase)
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape)
UpperCAmelCase_ = output[0, -1, -3:, -3:]
UpperCAmelCase_ = torch.tensor(_lowercase).to(_lowercase)
assert torch_all_close(output_slice.flatten() , _lowercase , atol=5E-3)
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''')
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.block_class(**_lowercase)
model.to(_lowercase)
model.train()
UpperCAmelCase_ = model(**_lowercase)
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = output[0]
UpperCAmelCase_ = torch.device(_lowercase)
UpperCAmelCase_ = randn_tensor(output.shape , device=_lowercase)
UpperCAmelCase_ = torch.nn.functional.mse_loss(_lowercase , _lowercase)
loss.backward()
| 344 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _UpperCamelCase ( __A ) -> Dict: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def _UpperCamelCase ( __A ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class lowercase_ ( a__ ):
def __a ( self ):
UpperCamelCase__ = {}
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = [1, 2]
UpperCamelCase__ = {"a": 1, "b": 2}
UpperCamelCase__ = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase__ = {"a": {"1": 1}, "b": 2}
UpperCamelCase__ = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase__ = {}
UpperCamelCase__ = []
UpperCamelCase__ = 2
UpperCamelCase__ = [2, 3]
UpperCamelCase__ = {"a": 2, "b": 3}
UpperCamelCase__ = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase__ = {"a": {"1": 2}, "b": 3}
UpperCamelCase__ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
UpperCamelCase__ = 2
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
UpperCamelCase__ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase__ = {"a": 2, "b": 0, "c": 2}
UpperCamelCase__ = {
"a": np.eye(2 ).astype(a ),
"b": np.zeros(3 ).astype(a ),
"c": np.ones(2 ).astype(a ),
}
self.assertEqual(map_nested(a , a , map_numpy=a ) , a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a , a , map_numpy=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a , a , map_numpy=a , num_proc=a ) , a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a , a , map_numpy=a , num_proc=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a ): # can't pickle a local lambda
map_nested(lambda a : x + 1 , a , num_proc=a )
def __a ( self ):
UpperCamelCase__ = {"a": 1, "b": 2}
UpperCamelCase__ = {"a": 3, "b": 4}
UpperCamelCase__ = {"a": 5, "b": 6}
UpperCamelCase__ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(a , a , a ) ) , a )
def __a ( self ):
class lowercase_ :
__UpperCAmelCase = 'bar'
UpperCamelCase__ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(a , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _UpperCamelCase ( __A , __A , __A ) -> List[Any]:
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase__ = {F'''{i}''': i for i in range(__A )}
UpperCamelCase__ = map_nested(lambda __A : x + 10 , __A , num_proc=__A , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase_ ( a__ ):
@require_tf
def __a ( self ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase__ = layers.Dense(2 )
def gen_random_output():
UpperCamelCase__ = tf.random.uniform((1, 3) )
return model(a ).numpy()
with temp_seed(42 , set_tensorflow=a ):
UpperCamelCase__ = gen_random_output()
with temp_seed(42 , set_tensorflow=a ):
UpperCamelCase__ = gen_random_output()
UpperCamelCase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __a ( self ):
import torch
def gen_random_output():
UpperCamelCase__ = torch.nn.Linear(3 , 2 )
UpperCamelCase__ = torch.rand(1 , 3 )
return model(a ).detach().numpy()
with temp_seed(42 , set_pytorch=a ):
UpperCamelCase__ = gen_random_output()
with temp_seed(42 , set_pytorch=a ):
UpperCamelCase__ = gen_random_output()
UpperCamelCase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __a ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase__ = gen_random_output()
with temp_seed(42 ):
UpperCamelCase__ = gen_random_output()
UpperCamelCase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = NestedDataStructure(__A ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def _UpperCamelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = NestedDataStructure(__A ).flatten()
assert output == expected_output
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = A(x=1 , y="foobar" )
UpperCamelCase__ = {"x": 1, "y": "foobar"}
assert asdict(__A ) == expected_output
UpperCamelCase__ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase__ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__A ) == expected_output
with pytest.raises(__A ):
asdict([1, A(x=10 , y="foo" )] )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return text.split()
def _UpperCamelCase ( __A ) -> List[str]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
with Pool(2 ) as pool:
UpperCamelCase__ = list(iflatmap_unordered(__A , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__A ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase__ = list(iflatmap_unordered(__A , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__A ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase__ = []
for yield_time, content in iflatmap_unordered(
__A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__A )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__A ) == 4
| 80 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = "arrow" , **lowerCamelCase , ) -> Dict:
super().__init__(
split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , **lowerCamelCase , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , working_dir=lowerCamelCase , **lowerCamelCase , )
def lowerCAmelCase_ ( self ) -> List[Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 34 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : List[str] = 'mobilenet_v1'
def __init__( self , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=1.0 , lowerCamelCase=8 , lowerCamelCase="relu6" , lowerCamelCase=True , lowerCamelCase=0.999 , lowerCamelCase=0.02 , lowerCamelCase=0.001 , **lowerCamelCase , ) -> List[str]:
super().__init__(**lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : str = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4 | 34 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _UpperCamelCase ( UpperCamelCase__ = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
UpperCAmelCase__ : Dict = nums[0]
for i in range(1 , len(__lowerCAmelCase ) ):
UpperCAmelCase__ : Dict = nums[i]
UpperCAmelCase__ : Any = max(__lowerCAmelCase , ans + num , __lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__A =int(input('Enter number of elements : ').strip())
__A =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 163 | import qiskit
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase : List[str] =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase : Dict =qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 348 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Tuple ):
UpperCamelCase :Optional[int] = inspect.getfile(accelerate.test_utils )
UpperCamelCase :str = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase :Optional[int] = test_metrics
@require_cpu
def _A ( self : List[str] ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _A ( self : List[Any] ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _A ( self : Union[str, Any] ):
self.test_metrics.main()
@require_multi_gpu
def _A ( self : int ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase :Tuple = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
| 62 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = DebertaVaTokenizer
snake_case__ : Any = DebertaVaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Tuple = True
def _A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : int , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :str = """this is a test"""
UpperCamelCase :Dict = """this is a test"""
return input_text, output_text
def _A ( self : Tuple ):
UpperCamelCase :Optional[Any] = """<pad>"""
UpperCamelCase :Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__lowerCamelCase ) , 30_001 )
def _A ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _A ( self : str ):
# fmt: off
UpperCamelCase :Optional[int] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase :Optional[Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Dict ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Optional[Any] ):
pass
def _A ( self : Optional[int] ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : str ):
# fmt: off
UpperCamelCase :List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :List[str] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
# fmt: off
UpperCamelCase :Optional[Any] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Dict = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :str = self.get_rust_tokenizer()
UpperCamelCase :Dict = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = self.get_rust_tokenizer()
UpperCamelCase :Tuple = tokenizer.encode(__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Dict ):
UpperCamelCase :Optional[int] = """This is a test"""
UpperCamelCase :str = [13, 1, 4_398, 25, 21, 1_289]
UpperCamelCase :int = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :Any = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
UpperCamelCase :Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCamelCase :Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase :Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.encode("""sequence builders""" )
UpperCamelCase :Any = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
UpperCamelCase :str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def _A ( self : List[Any] ):
# fmt: off
UpperCamelCase :Union[str, Any] = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 62 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=UpperCamelCase_ , )
assert hasattr(self , '''env''' )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
lowercase__ = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase_ , instance_count=UpperCamelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase_ , py_version='''py36''' , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[int] ) -> List[str]:
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase_ ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.create_estimator(UpperCamelCase_ )
# run training
estimator.fit()
# result dataframe
lowercase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase_ )
| 110 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Any=13 , UpperCamelCase_: Optional[Any]=7 , UpperCamelCase_: Optional[Any]=6 , UpperCamelCase_: Any=17 , UpperCamelCase_: str=23 , UpperCamelCase_: List[Any]=11 , UpperCamelCase_: Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = act_dim
lowercase__ = state_dim
lowercase__ = hidden_size
lowercase__ = max_length
lowercase__ = is_training
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
lowercase__ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
lowercase__ = DecisionTransformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = (DecisionTransformerModel,) if is_torch_available() else ()
_lowercase : List[str] = ()
_lowercase : List[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_lowercase : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_lowercase : Tuple = False
_lowercase : str = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Tuple = False
_lowercase : Dict = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = DecisionTransformerModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DecisionTransformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(UpperCamelCase_ )] , UpperCamelCase_ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = 2 # number of steps of autoregressive prediction we will perform
lowercase__ = 10 # defined by the RL environment, may be normalized
lowercase__ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase__ = model.to(UpperCamelCase_ )
lowercase__ = model.config
torch.manual_seed(0 )
lowercase__ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ) # env.reset()
lowercase__ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCamelCase_ )
lowercase__ = torch.tensor(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase__ = state
lowercase__ = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.zeros(1 , 0 , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.tensor(0 , device=UpperCamelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase_ ):
lowercase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase__ , lowercase__ , lowercase__ = model(
states=UpperCamelCase_ , actions=UpperCamelCase_ , rewards=UpperCamelCase_ , returns_to_go=UpperCamelCase_ , timesteps=UpperCamelCase_ , attention_mask=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase__ = action_pred[0, -1]
lowercase__ = torch.cat([states, state] , dim=1 )
lowercase__ = returns_to_go[0, -1] - reward
lowercase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 110 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowercase :
"""simple docstring"""
def __init__( self ) -> Dict:
_UpperCAmelCase : int = {}
def _snake_case ( self ,a_ ,a_ ,a_=1 ) -> Optional[int]:
if self.graph.get(a_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Tuple = [[w, v]]
if not self.graph.get(a_ ):
_UpperCAmelCase : Optional[Any] = []
def _snake_case ( self ) -> Optional[Any]:
return list(self.graph )
def _snake_case ( self ,a_ ,a_ ) -> int:
if self.graph.get(a_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a_ )
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Any:
if s == d:
return []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a_ ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return visited
def _snake_case ( self ,a_=-1 ) -> Union[str, Any]:
if c == -1:
_UpperCAmelCase : str = floor(random() * 10_000 ) + 10
for i in range(a_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(a_ ,a_ ,1 )
def _snake_case ( self ,a_=-2 ) -> str:
_UpperCAmelCase : Any = deque()
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
d.append(a_ )
visited.append(a_ )
while d:
_UpperCAmelCase : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self ,a_ ) -> Optional[Any]:
return len(self.graph[u] )
def _snake_case ( self ,a_=-2 ) -> int:
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : int = s
_UpperCAmelCase : int = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(a_ ) != 0:
_UpperCAmelCase : Any = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return sorted_nodes
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : int = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Union[str, Any] = -2
_UpperCAmelCase : str = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[Any] = len(a_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(a_ ) != 0:
_UpperCAmelCase : str = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Any = False
indirect_parents.append(a_ )
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return list(a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[Any] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Dict = -2
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = s
_UpperCAmelCase : str = False
_UpperCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : int = len(a_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(a_ ) != 0:
_UpperCAmelCase : Optional[int] = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(a_ )
_UpperCAmelCase : Any = s
_UpperCAmelCase : Any = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return False
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = time()
self.dfs(a_ ,a_ )
_UpperCAmelCase : Dict = time()
return end - begin
def _snake_case ( self ,a_=-2 ) -> int:
_UpperCAmelCase : int = time()
self.bfs(a_ )
_UpperCAmelCase : Union[str, Any] = time()
return end - begin
class lowercase :
"""simple docstring"""
def __init__( self ) -> str:
_UpperCAmelCase : List[Any] = {}
def _snake_case ( self ,a_ ,a_ ,a_=1 ) -> Union[str, Any]:
# check if the u exists
if self.graph.get(a_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : Optional[int] = [[w, v]]
# add the other way
if self.graph.get(a_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Optional[int] = [[w, u]]
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
if self.graph.get(a_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a_ )
# the other way round
if self.graph.get(a_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(a_ )
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Tuple:
if s == d:
return []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a_ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return visited
def _snake_case ( self ,a_=-1 ) -> List[Any]:
if c == -1:
_UpperCAmelCase : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(a_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(a_ ,a_ ,1 )
def _snake_case ( self ,a_=-2 ) -> int:
_UpperCAmelCase : Optional[int] = deque()
_UpperCAmelCase : Any = []
if s == -2:
_UpperCAmelCase : Tuple = list(self.graph )[0]
d.append(a_ )
visited.append(a_ )
while d:
_UpperCAmelCase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self ,a_ ) -> Tuple:
return len(self.graph[u] )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Tuple = -2
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[Any] = s
_UpperCAmelCase : int = False
_UpperCAmelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(a_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Any = True
if len(a_ ) != 0:
_UpperCAmelCase : Any = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Tuple = False
indirect_parents.append(a_ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : str = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return list(a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Tuple = list(self.graph )[0]
stack.append(a_ )
visited.append(a_ )
_UpperCAmelCase : Dict = -2
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : str = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[str] = len(a_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Optional[int] = True
if len(a_ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a_ ) - 1]
else:
_UpperCAmelCase : Dict = False
indirect_parents.append(a_ )
_UpperCAmelCase : List[str] = s
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a_ ) == 0:
return False
def _snake_case ( self ) -> List[Any]:
return list(self.graph )
def _snake_case ( self ,a_=-2 ,a_=-1 ) -> Optional[int]:
_UpperCAmelCase : Any = time()
self.dfs(a_ ,a_ )
_UpperCAmelCase : Optional[int] = time()
return end - begin
def _snake_case ( self ,a_=-2 ) -> Dict:
_UpperCAmelCase : Dict = time()
self.bfs(a_ )
_UpperCAmelCase : Union[str, Any] = time()
return end - begin
| 369 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 0 |
from math import loga
def A ( _lowercase ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :List[str] , *lowercase_ :Optional[Any] , **lowercase_ :List[Any] )-> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 123 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
def __init__( self :str , lowercase_ :str , )-> str:
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = False
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = "gelu"
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.0_2
A__ = 3
A__ = 4
A__ = None
def UpperCAmelCase_ ( self :Union[str, Any] )-> int:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :str )-> List[str]:
A__ = TFDistilBertModel(config=lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
A__ = [input_ids, input_mask]
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] )-> Optional[int]:
A__ = TFDistilBertForMaskedLM(config=lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Any , lowercase_ :str , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] )-> Optional[int]:
A__ = TFDistilBertForQuestionAnswering(config=lowercase_ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :Optional[int] )-> Any:
A__ = self.num_labels
A__ = TFDistilBertForSequenceClassification(lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :int , lowercase_ :Union[str, Any] )-> str:
A__ = self.num_choices
A__ = TFDistilBertForMultipleChoice(lowercase_ )
A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self :str , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :int , lowercase_ :List[Any] , lowercase_ :Tuple )-> Tuple:
A__ = self.num_labels
A__ = TFDistilBertForTokenClassification(lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
A__ = self.prepare_config_and_inputs()
((A__), (A__), (A__), (A__), (A__), (A__)) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowercase = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = TFDistilBertModelTester(self )
A__ = ConfigTester(self , config_class=lowercase_ , dim=37 )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :int )-> Tuple:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Dict:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self :List[str] )-> Dict:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ = TFDistilBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :List[Any] )-> Any:
A__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase_ )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase_ )
A__ = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 123 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = 48
UpperCamelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = 60
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 126
UpperCamelCase = 7
UpperCamelCase = 255.0
UpperCamelCase = ''
return config
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
UpperCamelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
UpperCamelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
UpperCamelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
UpperCamelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
UpperCamelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
UpperCamelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
UpperCamelCase = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase = 'layernorm.bias'
if "conv_first" in name:
UpperCamelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
UpperCamelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
UpperCamelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
UpperCamelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
UpperCamelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
UpperCamelCase = 'swin2sr.' + name
return name
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(A__ )
if "qkv" in key:
UpperCamelCase = key.split('.' )
UpperCamelCase = int(key_split[1] )
UpperCamelCase = int(key_split[4] )
UpperCamelCase = config.embed_dim
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
pass
else:
UpperCamelCase = val
return orig_state_dict
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = get_config(A__ )
UpperCamelCase = SwinaSRForImageSuperResolution(A__ )
model.eval()
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
UpperCamelCase = convert_state_dict(A__ , A__ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
if len(A__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(A__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
UpperCamelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
UpperCamelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase = 126 if 'Jpeg' in checkpoint_url else 256
UpperCamelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
UpperCamelCase = transforms(A__ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase = model(A__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1_024, 1_024] )
UpperCamelCase = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase = torch.Size([1, 3, 1_024, 1_024] )
UpperCamelCase = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1_024, 1_024] )
UpperCamelCase = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , A__ , atol=1e-3 )
print('Looks ok!' )
UpperCamelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
UpperCamelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A__ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
_lowerCamelCase : List[str] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowercase__ : Tuple = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Optional[int] = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_snake_case : bool = field(
default=__magic_name__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_snake_case : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_snake_case : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_snake_case : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
default=__magic_name__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : str = field(
default=__magic_name__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_snake_case : Optional[bool] = field(
default=__magic_name__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_snake_case : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''', lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_UpperCamelCase = load_dataset(
'''xnli''', model_args.language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
_UpperCamelCase = load_dataset(
'''xnli''', model_args.train_language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCamelCase = train_dataset.features['''label'''].names
if training_args.do_eval:
_UpperCamelCase = load_dataset(
'''xnli''', model_args.language, split='''validation''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCamelCase = eval_dataset.features['''label'''].names
if training_args.do_predict:
_UpperCamelCase = load_dataset(
'''xnli''', model_args.language, split='''test''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCamelCase = predict_dataset.features['''label'''].names
# Labels
_UpperCamelCase = len(lowercase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, idalabel={str(lowercase ): label for i, label in enumerate(lowercase )}, labelaid={label: i for i, label in enumerate(lowercase )}, finetuning_task='''xnli''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_UpperCamelCase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCamelCase = False
def preprocess_function(lowercase : Union[str, Any] ):
# Tokenize the texts
return tokenizer(
examples['''premise'''], examples['''hypothesis'''], padding=lowercase, max_length=data_args.max_seq_length, truncation=lowercase, )
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(lowercase ), data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
_UpperCamelCase = train_dataset.map(
lowercase, batched=lowercase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on train dataset''', )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase ) ), 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCamelCase = min(len(lowercase ), data_args.max_eval_samples )
_UpperCamelCase = eval_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
_UpperCamelCase = eval_dataset.map(
lowercase, batched=lowercase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on validation dataset''', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_UpperCamelCase = min(len(lowercase ), data_args.max_predict_samples )
_UpperCamelCase = predict_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
_UpperCamelCase = predict_dataset.map(
lowercase, batched=lowercase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on prediction dataset''', )
# Get the metric function
_UpperCamelCase = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase : EvalPrediction ):
_UpperCamelCase = p.predictions[0] if isinstance(p.predictions, lowercase ) else p.predictions
_UpperCamelCase = np.argmax(lowercase, axis=1 )
return metric.compute(predictions=lowercase, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCamelCase = default_data_collator
elif training_args.fpaa:
_UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 )
else:
_UpperCamelCase = None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=lowercase, tokenizer=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
_UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
_UpperCamelCase = trainer.train(resume_from_checkpoint=lowercase )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
_UpperCamelCase = min(lowercase, len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', lowercase )
trainer.save_metrics('''train''', lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(eval_dataset=lowercase )
_UpperCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
_UpperCamelCase = min(lowercase, len(lowercase ) )
trainer.log_metrics('''eval''', lowercase )
trainer.save_metrics('''eval''', lowercase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = trainer.predict(lowercase, metric_key_prefix='''predict''' )
_UpperCamelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase )
)
_UpperCamelCase = min(lowercase, len(lowercase ) )
trainer.log_metrics('''predict''', lowercase )
trainer.save_metrics('''predict''', lowercase )
_UpperCamelCase = np.argmax(lowercase, axis=1 )
_UpperCamelCase = os.path.join(training_args.output_dir, '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase, '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase ):
_UpperCamelCase = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 366 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ : List[Any] = parse(importlib.metadata.version('torch'))
def a__ ( lowercase : Union[str, Version], lowercase : str, lowercase : str ) -> List[str]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_UpperCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase, lowercase ):
_UpperCamelCase = parse(importlib.metadata.version(lowercase ) )
return operation(lowercase, parse(lowercase ) )
def a__ ( lowercase : str, lowercase : str ) -> List[Any]:
"""simple docstring"""
return compare_versions(lowercase, lowercase, lowercase )
| 287 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.