code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_( a__ ):
@staticmethod
@abstractmethod
def lowerCamelCase__ ( UpperCamelCase_ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__ ( self : Optional[Any] ):
raise NotImplementedError()
| 714
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = 0
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase_ ) , 0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
with pytest.raises(UpperCamelCase_ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowerCamelCase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowerCamelCase__ ( self : Tuple ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values()
lowerCAmelCase : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Any ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ )
@require_tokenizers
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?'''
lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' )
lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowerCamelCase__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ ( self : str ):
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ )
bert_tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowerCamelCase__ ( self : Optional[int] ):
class snake_case_( a__ ):
__UpperCamelCase = False
class snake_case_( a__ ):
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ )
AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(
UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def lowerCamelCase__ ( self : int ):
with self.assertRaisesRegex(
UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase__ ( self : Optional[int] ):
# Make sure we have cached the tokenizer.
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : Optional[Any] ):
stooge(_snake_case , 0 , len(_snake_case ) - 1 )
return arr
def _snake_case ( _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[Any] ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase : Union[str, Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_snake_case , _snake_case , (h - t) )
# Recursively sort last 2/3 elements
stooge(_snake_case , i + t , (_snake_case) )
# Recursively sort first 2/3 elements
stooge(_snake_case , _snake_case , (h - t) )
if __name__ == "__main__":
snake_case__ : Any = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 715
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : List[Any] = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
lowerCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : str = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : int = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[Any] = char
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCamelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase : List[Any] = bytes_to_unicode()
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : List[str] = tuple(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase : Any = bigram
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Tuple = tuple(UpperCamelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ )
lowerCAmelCase : List[str] = word
return word
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCAmelCase : Optional[int] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : Tuple = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 637
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ) -> Tuple:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : int = [0, 1]
lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase : int = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class snake_case_( a__ ):
__UpperCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase = Features({'''audio''': Audio()} )
__UpperCamelCase = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase = '''audio'''
__UpperCamelCase = '''transcription'''
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , UpperCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCAmelCase : Tuple = copy.deepcopy(self )
lowerCAmelCase : Any = self.input_schema.copy()
lowerCAmelCase : str = features[self.audio_column]
lowerCAmelCase : str = input_schema
return task_template
@property
def lowerCamelCase__ ( self : int ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 717
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637
| 0
|
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case_( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
def __init__( self : str , UpperCamelCase_ : int=None , **UpperCamelCase_ : str ):
super().__init__(features=UpperCamelCase_ )
lowerCAmelCase : Dict = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase : str = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCAmelCase : List[Any] = {'''dtype''': torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase : Optional[Any] = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowerCAmelCase : Dict = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , '''__array__''' ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
lowerCAmelCase : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : dict ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : pa.Table ):
lowerCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : pa.Table ):
lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
lowerCAmelCase : Dict = self.recursive_tensorize(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = self._consolidate(UpperCamelCase_ )
return column
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : pa.Table ):
lowerCAmelCase : List[str] = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.python_features_decoder.decode_batch(UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
lowerCAmelCase : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 718
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[int] , _snake_case : int ):
if len(_snake_case ) == 0:
return False
lowerCAmelCase : List[Any] = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip()
snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip())
snake_case__ : str = '''''' if binary_search(sequence, target) else '''not '''
print(f"""{target} was {not_str}found in {sequence}""")
| 637
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
snake_case__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 1_0_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[float] = None , UpperCamelCase_ : bool = True , ):
if audio_length_in_s is None:
lowerCAmelCase : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase : int = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase : Optional[int] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCAmelCase : List[Any] = int(UpperCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCAmelCase : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
lowerCAmelCase : Optional[int] = int(UpperCamelCase_ )
lowerCAmelCase : str = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , device=audio.device )
lowerCAmelCase : Any = self.scheduler.timesteps.to(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
lowerCAmelCase : List[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCamelCase_ )
| 719
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
snake_case__ : Optional[Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _snake_case ( _snake_case : Any ):
lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case )
lowerCAmelCase : str = TestCommand(*_snake_case )
test_command.run()
lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' )
assert os.path.exists(_snake_case )
lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case )
lowerCAmelCase : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2351563,
'''num_examples''': 10000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238418,
'''num_examples''': 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case )
if key == "num_bytes":
assert is_apercent_close(_snake_case , _snake_case )
elif key == "splits":
assert list(_snake_case ) == list(_snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 637
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class snake_case_( a__ ):
'''simple docstring'''
__UpperCamelCase = '''data2vec-text'''
def __init__( self : int , UpperCamelCase_ : Optional[int]=3_0_5_2_2 , UpperCamelCase_ : List[Any]=7_6_8 , UpperCamelCase_ : Any=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : Union[str, Any]=3_0_7_2 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Tuple=5_1_2 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : int=1E-12 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[Any]="absolute" , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : List[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Optional[int] = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : str = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class snake_case_( a__ ):
'''simple docstring'''
@property
def lowerCamelCase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 720
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ):
return base * power(_snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip())
snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip())
snake_case__ : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case__ : Dict = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 637
| 0
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case_:
def __init__( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=1_0_0 , UpperCamelCase_ : List[str]=1_3 , UpperCamelCase_ : Optional[int]=3_0 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : int=1_0 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=[0, 1, 2, 3] , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : List[Any] = 1_0_0
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = patch_size
lowerCAmelCase : Any = num_channels
lowerCAmelCase : List[Any] = is_training
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : Tuple = out_indices
lowerCAmelCase : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
lowerCAmelCase : List[Any] = num_patches + 1
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__ ( self : Any ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Union[str, Any] = BeitModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = BeitForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ):
lowerCAmelCase : Union[str, Any] = self.type_sequence_label_size
lowerCAmelCase : Union[str, Any] = BeitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : List[str] = 1
lowerCAmelCase : str = BeitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = self.num_labels
lowerCAmelCase : int = BeitForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : str = BeitModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def lowerCamelCase__ ( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCamelCase__ ( self : Optional[int] ):
pass
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Any = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
if not self.model_tester.is_training:
return
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling]:
continue
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase : Any = False
lowerCAmelCase : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase : str = model_class(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCamelCase__ ( self : Any ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Tuple = BeitModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _snake_case ( ):
lowerCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Dict ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Union[str, Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase_ )
# prepare bool_masked_pos
lowerCAmelCase : List[Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : List[Any] = model(pixel_values=UpperCamelCase_ , bool_masked_pos=UpperCamelCase_ )
lowerCAmelCase : List[str] = outputs.logits
# verify the logits
lowerCAmelCase : List[Any] = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase_ , atol=1E-2 ) )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : int = model(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = outputs.logits
# verify the logits
lowerCAmelCase : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
lowerCAmelCase : Optional[int] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : Optional[int] = prepare_img()
lowerCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ )
lowerCAmelCase : Tuple = outputs.logits
# verify the logits
lowerCAmelCase : Tuple = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
lowerCAmelCase : Optional[int] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCAmelCase : str = model.to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BeitImageProcessor(do_resize=UpperCamelCase_ , size=6_4_0 , do_center_crop=UpperCamelCase_ )
lowerCAmelCase : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase : int = Image.open(ds[0]['''file'''] )
lowerCAmelCase : Tuple = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**UpperCamelCase_ )
lowerCAmelCase : Any = outputs.logits
# verify the logits
lowerCAmelCase : List[str] = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase : List[str] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowerCAmelCase : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=UpperCamelCase_ , )
else:
lowerCAmelCase : Tuple = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCAmelCase : Optional[int] = model.to(UpperCamelCase_ )
lowerCAmelCase : Dict = BeitImageProcessor(do_resize=UpperCamelCase_ , size=6_4_0 , do_center_crop=UpperCamelCase_ )
lowerCAmelCase : Dict = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowerCAmelCase : str = Image.open(ds[0]['''file'''] )
lowerCAmelCase : List[str] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**UpperCamelCase_ )
lowerCAmelCase : List[str] = outputs.logits.detach().cpu()
lowerCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(5_0_0, 3_0_0)] )
lowerCAmelCase : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
lowerCAmelCase : str = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 721
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case__ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = '▁'
snake_case__ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
snake_case__ = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
snake_case__ = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
snake_case__ = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ['input_ids', 'attention_mask']
A_ = []
A_ = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
"""simple docstring"""
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase = language_codes
_lowerCamelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCamelCase = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCamelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = load_json(A_ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = spm_file
_lowerCamelCase = load_spm(A_ , self.sp_model_kwargs )
_lowerCamelCase = len(self.encoder )
_lowerCamelCase = {
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
_lowerCamelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
_lowerCamelCase = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCamelCase = src_lang if src_lang is not None else '''en'''
_lowerCamelCase = tgt_lang
_lowerCamelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCamelCase = num_madeup_words
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
_lowerCamelCase = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
_lowerCamelCase = [1] * len(self.prefix_tokens )
_lowerCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = Path(A_ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_lowerCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_lowerCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def UpperCamelCase_ ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase = src_lang
_lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ , **A_ ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCamelCase = src_lang
_lowerCamelCase = self(A_ , add_special_tokens=A_ , **A_ )
_lowerCamelCase = self.get_lang_id(A_ )
_lowerCamelCase = tgt_lang_id
return inputs
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = self.get_lang_token(A_ )
_lowerCamelCase = self.lang_token_to_id[lang_token]
_lowerCamelCase = [self.cur_lang_id]
_lowerCamelCase = [self.eos_token_id]
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = self.get_lang_token(A_ )
_lowerCamelCase = self.lang_token_to_id[lang_token]
_lowerCamelCase = [self.cur_lang_id]
_lowerCamelCase = [self.eos_token_id]
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
_lowerCamelCase = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def __magic_name__( __UpperCAmelCase ) -> Union[Dict, List]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' ) as f:
return json.load(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 638
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 1
|
snake_case__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_lowerCamelCase = Stack()
_lowerCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCAmelCase )
elif i == ")":
# RULE 4
_lowerCamelCase = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operators[opr](__UpperCAmelCase , __UpperCAmelCase )
operand_stack.push(__UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 638
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 1
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
from typing import Any
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = data
_lowerCamelCase = None
def __repr__( self ) -> str:
"""simple docstring"""
return F'Node({self.data})'
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
_lowerCamelCase = None
def __iter__( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.head
while node:
yield node.data
_lowerCamelCase = node.next
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> str:
"""simple docstring"""
return "->".join([str(A_ ) for item in self] )
def __getitem__( self , A_ ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , A_ , A_ ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
_lowerCamelCase = self.head
for _ in range(A_ ):
_lowerCamelCase = current.next
_lowerCamelCase = data
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , A_ )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
self.insert_nth(0 , A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
_lowerCamelCase = Node(A_ )
if self.head is None:
_lowerCamelCase = new_node
elif index == 0:
_lowerCamelCase = self.head # link new_node to head
_lowerCamelCase = new_node
else:
_lowerCamelCase = self.head
for _ in range(index - 1 ):
_lowerCamelCase = temp.next
_lowerCamelCase = temp.next
_lowerCamelCase = new_node
def UpperCamelCase_ ( self ) -> None: # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase_ ( self ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self , A_ = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
_lowerCamelCase = self.head # default first node
if index == 0:
_lowerCamelCase = self.head.next
else:
_lowerCamelCase = self.head
for _ in range(index - 1 ):
_lowerCamelCase = temp.next
_lowerCamelCase = temp.next
_lowerCamelCase = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ) -> bool:
"""simple docstring"""
return self.head is None
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = self.head
while current:
# Store the current node's next node.
_lowerCamelCase = current.next
# Make the current node's next point backwards
_lowerCamelCase = prev
# Make the previous node be the current node
_lowerCamelCase = current
# Make the current node the next node (to progress iteration)
_lowerCamelCase = next_node
# Return prev in order to put the head at the end
_lowerCamelCase = prev
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCAmelCase ) == i
linked_list.insert_nth(__UpperCAmelCase , i + 1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCAmelCase ) == 9
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_lowerCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(-8 , 1 ) )
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = [
-9,
100,
Node(7734_5112 ),
'''dlrow olleH''',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
_lowerCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_lowerCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_lowerCamelCase = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_lowerCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCAmelCase )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
_lowerCamelCase = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__UpperCAmelCase )
print('''\nReading/changing Node data using indexing:''' )
print(F'Element at Position 1: {linked_list[1]}' )
_lowerCamelCase = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__UpperCAmelCase )
print(F'length of linked_list is : {len(__UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 638
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 1
|
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = len(matrix[0] )
_lowerCamelCase = min(__UpperCAmelCase , __UpperCAmelCase )
for row in range(__UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __UpperCAmelCase ):
_lowerCamelCase = matrix[col][row] / matrix[row][row]
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_lowerCamelCase = True
for i in range(row + 1 , __UpperCAmelCase ):
if matrix[i][row] != 0:
_lowerCamelCase , _lowerCamelCase = matrix[i], matrix[row]
_lowerCamelCase = False
break
if reduce:
rank -= 1
for i in range(__UpperCAmelCase ):
_lowerCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
snake_case__ = TypeVar('T')
class UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = len(A_ )
_lowerCamelCase = [any_type for _ in range(self.N )] + arr
_lowerCamelCase = fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
_lowerCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , A_ , A_ ) -> None:
"""simple docstring"""
p += self.N
_lowerCamelCase = v
while p > 1:
_lowerCamelCase = p // 2
_lowerCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , A_ , A_ ) -> T | None: # noqa: E741
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = l + self.N, r + self.N
_lowerCamelCase = None
while l <= r:
if l % 2 == 1:
_lowerCamelCase = self.st[l] if res is None else self.fn(A_ , self.st[l] )
if r % 2 == 0:
_lowerCamelCase = self.st[r] if res is None else self.fn(A_ , self.st[r] )
_lowerCamelCase , _lowerCamelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
snake_case__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
snake_case__ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
snake_case__ = SegmentTree(test_array, min)
snake_case__ = SegmentTree(test_array, max)
snake_case__ = SegmentTree(test_array, lambda a, b: a + b)
def __magic_name__( ) -> None:
'''simple docstring'''
for i in range(len(__UpperCAmelCase ) ):
for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
_lowerCamelCase = reduce(__UpperCAmelCase , test_array[i : j + 1] )
_lowerCamelCase = reduce(__UpperCAmelCase , test_array[i : j + 1] )
_lowerCamelCase = reduce(lambda __UpperCAmelCase , __UpperCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__UpperCAmelCase , __UpperCAmelCase )
assert max_range == max_segment_tree.query(__UpperCAmelCase , __UpperCAmelCase )
assert sum_range == sum_segment_tree.query(__UpperCAmelCase , __UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
snake_case__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 638
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 1
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = [10, 20, 30, 40, 50, 60]
_lowerCamelCase = [2, 4, 6, 8, 10, 12]
_lowerCamelCase = 1_00
self.assertEqual(kp.calc_profit(A_ , A_ , A_ ) , 2_10 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.assertRaisesRegex(A_ , '''max_weight must greater than zero.''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(A_ , '''Weight can not be negative.''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
self.assertRaisesRegex(A_ , '''Profit can not be negative.''' )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.assertRaisesRegex(A_ , '''max_weight must greater than zero.''' )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.assertRaisesRegex(
A_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 638
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 1
|
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = [0] * len(__UpperCAmelCase )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
_lowerCamelCase = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print('''Cycle exists''' )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
snake_case__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 638
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ = Features({'image': Image()} )
A_ = Features({'labels': ClassLabel} )
A_ = "image"
A_ = "labels"
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , A_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_lowerCamelCase = copy.deepcopy(self )
_lowerCamelCase = self.label_schema.copy()
_lowerCamelCase = features[self.label_column]
_lowerCamelCase = label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 638
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
snake_case__ = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
snake_case__ = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = SqueezeBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> int:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(A_ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**A_ )
_lowerCamelCase = do_lower_case
def UpperCamelCase_ ( self , A_ , A_=None ) -> int:
"""simple docstring"""
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 638
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 1
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__( __UpperCAmelCase ) -> List[Tuple[int, ...]]:
'''simple docstring'''
_lowerCamelCase = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCAmelCase ) )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCAmelCase ) )
elif isinstance(__UpperCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple[int, ...]:
'''simple docstring'''
_lowerCamelCase = []
for d in reversed(__UpperCAmelCase ):
idx.append(flat_idx % d )
_lowerCamelCase = flat_idx // d
return tuple(reversed(__UpperCAmelCase ) )
@torch.jit.ignore
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(__UpperCAmelCase ) -> None:
_lowerCamelCase = True
for i in range(len(__UpperCAmelCase ) ):
_lowerCamelCase = -1 * (i + 1)
l[reversed_idx] &= tally
_lowerCamelCase = l[reversed_idx]
if start_edges is None:
_lowerCamelCase = [s == 0 for s in start]
reduce_edge_list(__UpperCAmelCase )
if end_edges is None:
_lowerCamelCase = [e == (d - 1) for e, d in zip(__UpperCAmelCase , __UpperCAmelCase )]
reduce_edge_list(__UpperCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCAmelCase ) == 0:
return [()]
elif len(__UpperCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_lowerCamelCase = []
_lowerCamelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCAmelCase , __UpperCAmelCase ):
if s == e:
path_list.append(slice(__UpperCAmelCase , s + 1 ) )
else:
break
_lowerCamelCase = tuple(__UpperCAmelCase )
_lowerCamelCase = len(__UpperCAmelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase = start[divergence_idx]
return tuple(
path + (slice(__UpperCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCamelCase = end[divergence_idx]
return tuple(
path + (slice(__UpperCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_lowerCamelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
_lowerCamelCase = t.shape[:no_batch_dims]
_lowerCamelCase = list(_flat_idx_to_idx(__UpperCAmelCase , __UpperCAmelCase ) )
# _get_minimal_slice_set is inclusive
_lowerCamelCase = list(_flat_idx_to_idx(flat_end - 1 , __UpperCAmelCase ) )
# Get an ordered list of slices to perform
_lowerCamelCase = _get_minimal_slice_set(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
_lowerCamelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Any:
'''simple docstring'''
if not (len(__UpperCAmelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
_lowerCamelCase = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCAmelCase )]
_lowerCamelCase = tuple([max(__UpperCAmelCase ) for s in zip(*__UpperCAmelCase )] )
def _prep_inputs(__UpperCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_lowerCamelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_lowerCamelCase = tensor_tree_map(_prep_inputs , __UpperCAmelCase )
_lowerCamelCase = None
if _out is not None:
_lowerCamelCase = tensor_tree_map(lambda __UpperCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_lowerCamelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_lowerCamelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_lowerCamelCase = 0
_lowerCamelCase = prepped_outputs
for _ in range(__UpperCAmelCase ):
# Chunk the input
if not low_mem:
_lowerCamelCase = _select_chunk
else:
_lowerCamelCase = partial(
_chunk_slice , flat_start=__UpperCAmelCase , flat_end=min(__UpperCAmelCase , i + chunk_size ) , no_batch_dims=len(__UpperCAmelCase ) , )
_lowerCamelCase = tensor_tree_map(__UpperCAmelCase , __UpperCAmelCase )
# Run the layer on the chunk
_lowerCamelCase = layer(**__UpperCAmelCase )
# Allocate space for the output
if out is None:
_lowerCamelCase = tensor_tree_map(lambda __UpperCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
def assign(__UpperCAmelCase , __UpperCAmelCase ) -> None:
for k, v in da.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assign(__UpperCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_lowerCamelCase = da[k]
assign(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for xa, xa in zip(__UpperCAmelCase , __UpperCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_lowerCamelCase = xa
elif isinstance(__UpperCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_lowerCamelCase = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
_lowerCamelCase = tensor_tree_map(lambda __UpperCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCAmelCase )
return out
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = 5_12 , ) -> str:
"""simple docstring"""
_lowerCamelCase = max_chunk_size
_lowerCamelCase = None
_lowerCamelCase = None
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_lowerCamelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_lowerCamelCase = [c for c in candidates if c > min_chunk_size]
_lowerCamelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A_ ) -> bool:
try:
with torch.no_grad():
fn(*A_ , chunk_size=A_ )
return True
except RuntimeError:
return False
_lowerCamelCase = 0
_lowerCamelCase = len(A_ ) - 1
while i > min_viable_chunk_size_index:
_lowerCamelCase = test_chunk_size(candidates[i] )
if not viable:
_lowerCamelCase = (min_viable_chunk_size_index + i) // 2
else:
_lowerCamelCase = i
_lowerCamelCase = (i + len(A_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase_ ( self , A_ , A_ ) -> bool:
"""simple docstring"""
_lowerCamelCase = True
for aa, aa in zip(A_ , A_ ):
assert type(A_ ) == type(A_ )
if isinstance(A_ , (list, tuple) ):
consistent &= self._compare_arg_caches(A_ , A_ )
elif isinstance(A_ , A_ ):
_lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
_lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
consistent &= self._compare_arg_caches(A_ , A_ )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase_ ( self , A_ , A_ , A_ , ) -> int:
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = tree_map(lambda A_ : a.shape if isinstance(A_ , torch.Tensor ) else a , A_ , A_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A_ )
_lowerCamelCase = self._compare_arg_caches(self.cached_arg_data , A_ )
else:
# Otherwise, we can reuse the precomputed value
_lowerCamelCase = False
if not consistent:
_lowerCamelCase = self._determine_favorable_chunk_size(
A_ , A_ , A_ , )
_lowerCamelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 638
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'autoformer'
A_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = True , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 32 , A_ = 32 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 1_00 , A_ = 0.02 , A_ = True , A_=True , A_ = 10 , A_ = 25 , A_ = 3 , **A_ , ) -> str:
"""simple docstring"""
# time series specific configuration
_lowerCamelCase = prediction_length
_lowerCamelCase = context_length if context_length is not None else prediction_length
_lowerCamelCase = distribution_output
_lowerCamelCase = loss
_lowerCamelCase = input_size
_lowerCamelCase = num_time_features
_lowerCamelCase = lags_sequence
_lowerCamelCase = scaling
_lowerCamelCase = num_dynamic_real_features
_lowerCamelCase = num_static_real_features
_lowerCamelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_lowerCamelCase = cardinality
else:
_lowerCamelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_lowerCamelCase = embedding_dimension
else:
_lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
_lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_lowerCamelCase = d_model
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = use_cache
# Autoformer
_lowerCamelCase = label_length
_lowerCamelCase = moving_average
_lowerCamelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 638
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for part_id in partition_order:
_lowerCamelCase = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__UpperCAmelCase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Any:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(100 ).repartition(1 )
_lowerCamelCase = Spark(__UpperCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(10 ).repartition(2 )
_lowerCamelCase = [1, 0]
_lowerCamelCase = _generate_iterable_examples(__UpperCAmelCase , __UpperCAmelCase ) # Reverse the partitions.
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , __UpperCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Dict:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(10 ).repartition(1 )
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
_lowerCamelCase = lambda __UpperCAmelCase : x.reverse()
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [2, 1, 0] )
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase ).shuffle_data_sources(__UpperCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> str:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(100 ).repartition(1 )
_lowerCamelCase = Spark(__UpperCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 638
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 1
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(A_ , A_ , A_ )
self.assertEqual(A_ , ['''c'''] )
self.assertEqual(A_ , [2] )
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , A_ , A_ )
self.assertEqual(A_ , ['''a''', '''c'''] )
self.assertEqual(A_ , [0, 2] )
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(A_ , [0, 2] , A_ )
self.assertEqual(A_ , ['''a''', '''c'''] )
self.assertEqual(A_ , [0, 2] )
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(A_ , [-3, -1] , A_ )
self.assertEqual(A_ , ['''a''', '''c'''] )
self.assertEqual(A_ , [-3, -1] )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
# Stage names must be set
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A_ )
# Out features must be a list
with self.assertRaises(A_ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A_ ):
verify_out_features_out_indices(A_ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A_ ):
verify_out_features_out_indices(A_ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = BackboneMixin()
_lowerCamelCase = ['''a''', '''b''', '''c''']
_lowerCamelCase = ['''a''', '''c''']
_lowerCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_lowerCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_lowerCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 638
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> None:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = 0
def UpperCamelCase_ ( self ) -> bool:
"""simple docstring"""
return self.head == self.tail
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
self.data.append(A_ )
_lowerCamelCase = self.tail + 1
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.data[self.head]
_lowerCamelCase = self.head + 1
return ret
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return self.tail - self.head
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = data
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = 1
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
return self.data
def UpperCamelCase_ ( self ) -> MyNode | None:
"""simple docstring"""
return self.left
def UpperCamelCase_ ( self ) -> MyNode | None:
"""simple docstring"""
return self.right
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return self.height
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = data
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = node
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = node
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = height
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def __magic_name__( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
print('''left rotation node:''' , node.get_data() )
_lowerCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCAmelCase )
_lowerCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
_lowerCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCAmelCase )
return ret
def __magic_name__( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
print('''right rotation node:''' , node.get_data() )
_lowerCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCAmelCase )
_lowerCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
_lowerCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCAmelCase )
return ret
def __magic_name__( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
_lowerCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCAmelCase ) )
return right_rotation(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
_lowerCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCAmelCase ) )
return left_rotation(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(__UpperCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __UpperCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_lowerCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_lowerCamelCase = right_rotation(__UpperCAmelCase )
else:
_lowerCamelCase = lr_rotation(__UpperCAmelCase )
else:
node.set_right(insert_node(node.get_right() , __UpperCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_lowerCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_lowerCamelCase = rl_rotation(__UpperCAmelCase )
else:
_lowerCamelCase = left_rotation(__UpperCAmelCase )
_lowerCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
return node
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
while True:
_lowerCamelCase = root.get_right()
if right_child is None:
break
_lowerCamelCase = right_child
return root.get_data()
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
while True:
_lowerCamelCase = root.get_left()
if left_child is None:
break
_lowerCamelCase = left_child
return root.get_data()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> MyNode | None:
'''simple docstring'''
_lowerCamelCase = root.get_left()
_lowerCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_lowerCamelCase = get_left_most(__UpperCAmelCase )
root.set_data(__UpperCAmelCase )
root.set_right(del_node(__UpperCAmelCase , __UpperCAmelCase ) )
elif left_child is not None:
_lowerCamelCase = left_child
elif right_child is not None:
_lowerCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__UpperCAmelCase , __UpperCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCAmelCase , __UpperCAmelCase ) )
if get_height(__UpperCAmelCase ) - get_height(__UpperCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_lowerCamelCase = left_rotation(__UpperCAmelCase )
else:
_lowerCamelCase = rl_rotation(__UpperCAmelCase )
elif get_height(__UpperCAmelCase ) - get_height(__UpperCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_lowerCamelCase = right_rotation(__UpperCAmelCase )
else:
_lowerCamelCase = lr_rotation(__UpperCAmelCase )
_lowerCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__UpperCAmelCase )
return root
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> None:
"""simple docstring"""
_lowerCamelCase = None
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return get_height(self.root )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
print('''insert:''' + str(A_ ) )
_lowerCamelCase = insert_node(self.root , A_ )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
print('''delete:''' + str(A_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
_lowerCamelCase = del_node(self.root , A_ )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_lowerCamelCase = ''''''
_lowerCamelCase = MyQueue()
q.push(self.root )
_lowerCamelCase = self.get_height()
if layer == 0:
return output
_lowerCamelCase = 0
while not q.is_empty():
_lowerCamelCase = q.pop()
_lowerCamelCase = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(A_ )
q.push(A_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_lowerCamelCase = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , A_ ) - 1:
_lowerCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case__ = AVLtree()
snake_case__ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 638
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 1
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
snake_case__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
snake_case__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_lowerCamelCase = bs[:]
_lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
_lowerCamelCase = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = set()
_lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase = char
return pairs
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase = json.load(A_ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = errors # how to handle errors in decoding
_lowerCamelCase = bytes_to_unicode()
_lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {}
_lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowerCamelCase = tuple(A_ )
_lowerCamelCase = get_pairs(A_ )
if not pairs:
return token
while True:
_lowerCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase = bigram
_lowerCamelCase = []
_lowerCamelCase = 0
while i < len(A_ ):
try:
_lowerCamelCase = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase = tuple(A_ )
_lowerCamelCase = new_word
if len(A_ ) == 1:
break
else:
_lowerCamelCase = get_pairs(A_ )
_lowerCamelCase = ''' '''.join(A_ )
_lowerCamelCase = word
return word
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = []
for token in re.findall(self.pat , A_ ):
_lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(''' ''' ) )
return bpe_tokens
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
return self.decoder.get(A_ )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = ''''''.join(A_ )
_lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '''\n''' )
_lowerCamelCase = 0
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''' '''.join(A_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self , A_ , A_=False , **A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
_lowerCamelCase = ''' ''' + text
return (text, kwargs)
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self , A_ ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
_lowerCamelCase = ''' '''.join(A_ )
_lowerCamelCase = self.encode(A_ )
if len(A_ ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 638
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'marian'
A_ = ['past_key_values']
A_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A_=5_81_01 , A_=None , A_=10_24 , A_=12 , A_=40_96 , A_=16 , A_=12 , A_=40_96 , A_=16 , A_=0.0 , A_=0.0 , A_=True , A_=True , A_="gelu" , A_=10_24 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=5_81_00 , A_=False , A_=5_81_00 , A_=0 , A_=0 , A_=True , **A_ , ) -> int:
"""simple docstring"""
_lowerCamelCase = vocab_size
_lowerCamelCase = decoder_vocab_size or vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = use_cache
_lowerCamelCase = encoder_layers
_lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase = {0: '''batch'''}
_lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCamelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase , _lowerCamelCase = self.num_layers
for i in range(A_ ):
_lowerCamelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_lowerCamelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase = super().outputs
else:
_lowerCamelCase = super(A_ , self ).outputs
if self.use_past:
_lowerCamelCase , _lowerCamelCase = self.num_layers
for i in range(A_ ):
_lowerCamelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCamelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_lowerCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
_lowerCamelCase = seq_length if not self.use_past else 1
_lowerCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
A_ , A_ , A_ , A_ , A_ )
_lowerCamelCase = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_lowerCamelCase = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase = common_inputs['''input_ids'''].shape
_lowerCamelCase = common_inputs['''decoder_input_ids'''].shape[1]
_lowerCamelCase , _lowerCamelCase = self.num_attention_heads
_lowerCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase = decoder_seq_length + 3
_lowerCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCamelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(A_ , A_ )] , dim=1 )
_lowerCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCamelCase , _lowerCamelCase = self.num_layers
_lowerCamelCase = min(A_ , A_ )
_lowerCamelCase = max(A_ , A_ ) - min_num_layers
_lowerCamelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
_lowerCamelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def UpperCamelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_lowerCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase , _lowerCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCamelCase = seqlen + 2
_lowerCamelCase , _lowerCamelCase = self.num_layers
_lowerCamelCase , _lowerCamelCase = self.num_attention_heads
_lowerCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase = common_inputs['''attention_mask'''].dtype
_lowerCamelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
_lowerCamelCase = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def UpperCamelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase = tokenizer.num_special_tokens_to_add(A_ )
_lowerCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCamelCase = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def UpperCamelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
_lowerCamelCase = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
_lowerCamelCase = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 638
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 1
|
from __future__ import annotations
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = text, pattern
_lowerCamelCase , _lowerCamelCase = len(A_ ), len(A_ )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase_ ( self ) -> list[int]:
"""simple docstring"""
# searches pattern in text and returns index positions
_lowerCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
_lowerCamelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ = 'ABAABA'
snake_case__ = 'AB'
snake_case__ = BoyerMooreSearch(text, pattern)
snake_case__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 1
|
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
A_ = 'swin'
A_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , A_=2_24 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1E-5 , A_=32 , A_=None , A_=None , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**A_ )
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = embed_dim
_lowerCamelCase = depths
_lowerCamelCase = len(A_ )
_lowerCamelCase = num_heads
_lowerCamelCase = window_size
_lowerCamelCase = mlp_ratio
_lowerCamelCase = qkv_bias
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_absolute_embeddings
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase = int(embed_dim * 2 ** (len(A_ ) - 1) )
_lowerCamelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(A_ ) + 1 )]
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 638
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 638
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=50 , A_=0.02 , A_=True , A_=None , ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = use_labels
_lowerCamelCase = scope
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A_ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCamelCase = True
_lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , attention_mask=A_ )
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ) -> str:
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
_lowerCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ) -> Dict:
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = BertGenerationDecoder(config=A_ ).to(A_ ).eval()
# first forward pass
_lowerCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
_lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
_lowerCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , *A_ , ) -> int:
"""simple docstring"""
_lowerCamelCase = BertGenerationDecoder(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
A_ = (BertGenerationDecoder,) if is_torch_available() else ()
A_ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = BertGenerationEncoderTester(self )
_lowerCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase = '''bert'''
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
A_ , A_ , A_ , A_ , A_ , A_ , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A_ )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(A_ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowerCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
_lowerCamelCase = model(A_ )[0]
_lowerCamelCase = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , A_ )
_lowerCamelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowerCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
_lowerCamelCase = model(A_ )[0]
_lowerCamelCase = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , A_ )
_lowerCamelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
| 638
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
A_ = 'resnet'
A_ = ['basic', 'bottleneck']
def __init__( self , A_=3 , A_=64 , A_=[2_56, 5_12, 10_24, 20_48] , A_=[3, 4, 6, 3] , A_="bottleneck" , A_="relu" , A_=False , A_=None , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
_lowerCamelCase = num_channels
_lowerCamelCase = embedding_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = layer_type
_lowerCamelCase = hidden_act
_lowerCamelCase = downsample_in_first_stage
_lowerCamelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(A_ ) + 1 )]
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-3
| 638
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 1
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'segformer'
def __init__( self , A_=3 , A_=4 , A_=[2, 2, 2, 2] , A_=[8, 4, 2, 1] , A_=[32, 64, 1_60, 2_56] , A_=[7, 3, 3, 3] , A_=[4, 2, 2, 2] , A_=[1, 2, 5, 8] , A_=[4, 4, 4, 4] , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.02 , A_=0.1 , A_=1E-6 , A_=2_56 , A_=2_55 , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , A_ , )
_lowerCamelCase = num_channels
_lowerCamelCase = num_encoder_blocks
_lowerCamelCase = depths
_lowerCamelCase = sr_ratios
_lowerCamelCase = hidden_sizes
_lowerCamelCase = patch_sizes
_lowerCamelCase = strides
_lowerCamelCase = mlp_ratios
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = classifier_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = drop_path_rate
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = decoder_hidden_size
_lowerCamelCase = kwargs.get('''reshape_last_stage''' , A_ )
_lowerCamelCase = semantic_loss_ignore_index
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-4
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 12
| 638
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
snake_case__ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
snake_case__ = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = collections.OrderedDict()
_lowerCamelCase = collections.OrderedDict()
_lowerCamelCase = collections.OrderedDict()
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__UpperCAmelCase ):
_lowerCamelCase = b
_lowerCamelCase = idx
for wd in b:
_lowerCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_ , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|startoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , )
if not os.path.isfile(A_ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(A_ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
_lowerCamelCase = do_clean_text
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = load_vocab_and_emoji(A_ , A_ )
_lowerCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(A_ )
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = ''''''.join(A_ ).strip()
return out_string
def UpperCamelCase_ ( self , A_ ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = 0
if os.path.isdir(A_ ):
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
_lowerCamelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
_lowerCamelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''','''.join(A_ ) + '''\n''' )
index += 1
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , A_ )
return vocab_file, emoji_file
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = vocab # same as swe
_lowerCamelCase = ids_to_tokens # same as bpe
_lowerCamelCase = emoji
_lowerCamelCase = np.max([len(A_ ) for w in self.vocab.keys()] )
_lowerCamelCase = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
_lowerCamelCase = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
_lowerCamelCase = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
_lowerCamelCase = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_lowerCamelCase = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_lowerCamelCase = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
_lowerCamelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
_lowerCamelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
_lowerCamelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.content_repattera.sub('''<URL>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<EMAIL>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<TEL>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<DATE>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<DATE>''' , A_ )
_lowerCamelCase = self.content_repattera.sub('''<PRICE>''' , A_ )
_lowerCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_lowerCamelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def UpperCamelCase_ ( self , A_ , A_=False ) -> Dict:
"""simple docstring"""
_lowerCamelCase = text.replace(''' ''' , '''<SP>''' )
_lowerCamelCase = text.replace(''' ''' , '''<SP>''' )
_lowerCamelCase = text.replace('''\r\n''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\n''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\r''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\t''' , '''<TAB>''' )
_lowerCamelCase = text.replace('''—''' , '''ー''' )
_lowerCamelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
_lowerCamelCase = text.replace(A_ , A_ )
if clean:
_lowerCamelCase = self.clean_text(A_ )
def check_simbol(A_ ):
_lowerCamelCase = x.encode()
if len(A_ ) == 1 and len(A_ ) == 2:
_lowerCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(A_ ):
_lowerCamelCase = x.encode()
if len(A_ ) == 1 and len(A_ ) == 3:
_lowerCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
_lowerCamelCase = 0
_lowerCamelCase = []
while pos < len(A_ ):
_lowerCamelCase = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
_lowerCamelCase = [] # (token_id, token, pos)
for e in range(A_ , A_ , -1 ):
_lowerCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A_ ) > 2:
_lowerCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A_ ) > 0:
# the smallest token_id is adopted
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = sorted(A_ , key=lambda A_ : x[0] )[0]
result.append(A_ )
_lowerCamelCase = e
else:
_lowerCamelCase = pos + 1
_lowerCamelCase = text[pos:end]
if check_simbol(A_ ):
result.append('''<KIGOU>''' )
elif checkuae(A_ ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
_lowerCamelCase = end
return result
def UpperCamelCase_ ( self , A_ , A_="\n" ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('''utf-8''' , errors='''replace''' ) )
_lowerCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(A_ )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(A_ )
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('''utf-8''' , errors='''replace''' ) )
_lowerCamelCase = ''''''.join(A_ )
return text
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
snake_case__ = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
A_ = RobertaTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , A_=True , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
_lowerCamelCase = getattr(A_ , pre_tok_state.pop('''type''' ) )
_lowerCamelCase = add_prefix_space
_lowerCamelCase = pre_tok_class(**A_ )
_lowerCamelCase = add_prefix_space
_lowerCamelCase = '''post_processor'''
_lowerCamelCase = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
_lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase = tuple(state['''cls'''] )
_lowerCamelCase = False
if state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
_lowerCamelCase = add_prefix_space
_lowerCamelCase = True
if state.get('''trim_offsets''' , A_ ) != trim_offsets:
_lowerCamelCase = trim_offsets
_lowerCamelCase = True
if changes_to_apply:
_lowerCamelCase = getattr(A_ , state.pop('''type''' ) )
_lowerCamelCase = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
_lowerCamelCase = value
def UpperCamelCase_ ( self , *A_ , **A_ ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def UpperCamelCase_ ( self , *A_ , **A_ ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def UpperCamelCase_ ( self , A_ , A_=None ) -> int:
"""simple docstring"""
_lowerCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 638
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case__ = 'facebook/wmt19-en-de'
snake_case__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case__ = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
snake_case__ = tokenizer(['Making tiny model'], return_tensors='pt')
snake_case__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
snake_case__ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 638
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 1
|
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __magic_name__( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(len(__UpperCAmelCase ) ):
_lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__UpperCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__UpperCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__UpperCAmelCase )
return next_generation
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> list[Image.Image]:
'''simple docstring'''
_lowerCamelCase = []
for _ in range(__UpperCAmelCase ):
# Create output image
_lowerCamelCase = Image.new('''RGB''' , (len(cells[0] ), len(__UpperCAmelCase )) )
_lowerCamelCase = img.load()
# Save cells to image
for x in range(len(__UpperCAmelCase ) ):
for y in range(len(cells[0] ) ):
_lowerCamelCase = 255 - cells[y][x] * 255
_lowerCamelCase = (colour, colour, colour)
# Save image
images.append(__UpperCAmelCase )
_lowerCamelCase = new_generation(__UpperCAmelCase )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 638
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case__ = logging.getLogger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self , A_ , A_ , A_=None , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.layer[current_layer](A_ , A_ , head_mask[current_layer] )
_lowerCamelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __lowercase , )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ ) -> int:
"""simple docstring"""
super().__init__(A_ )
_lowerCamelCase = BertEncoderWithPabee(A_ )
self.init_weights()
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = threshold
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = patience
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = 0
_lowerCamelCase = 0
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.inference_layers_num / self.inference_instances_num
_lowerCamelCase = (
F'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
F' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(A_ )
@add_start_docstrings_to_model_forward(A_ )
def UpperCamelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , ) -> Tuple:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_lowerCamelCase = input_ids.size()
elif inputs_embeds is not None:
_lowerCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_lowerCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCamelCase = torch.ones(A_ , device=A_ )
if token_type_ids is None:
_lowerCamelCase = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCamelCase = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = encoder_hidden_states.size()
_lowerCamelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowerCamelCase = torch.ones(A_ , device=A_ )
_lowerCamelCase = self.invert_attention_mask(A_ )
else:
_lowerCamelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCamelCase = self.get_head_mask(A_ , self.config.num_hidden_layers )
_lowerCamelCase = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
_lowerCamelCase = embedding_output
if self.training:
_lowerCamelCase = []
for i in range(self.config.num_hidden_layers ):
_lowerCamelCase = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
_lowerCamelCase = self.pooler(A_ )
_lowerCamelCase = output_layers[i](output_dropout(A_ ) )
res.append(A_ )
elif self.patience == 0: # Use all layers for inference
_lowerCamelCase = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
_lowerCamelCase = self.pooler(encoder_outputs[0] )
_lowerCamelCase = [output_layers[self.config.num_hidden_layers - 1](A_ )]
else:
_lowerCamelCase = 0
_lowerCamelCase = None
_lowerCamelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowerCamelCase = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
_lowerCamelCase = self.pooler(A_ )
_lowerCamelCase = output_layers[i](A_ )
if regression:
_lowerCamelCase = logits.detach()
if patient_result is not None:
_lowerCamelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowerCamelCase = 0
else:
_lowerCamelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowerCamelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A_ ) ):
patient_counter += 1
else:
_lowerCamelCase = 0
_lowerCamelCase = logits
if patient_counter == self.patience:
break
_lowerCamelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __lowercase , )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(A_ )
_lowerCamelCase = config.num_labels
_lowerCamelCase = BertModelWithPabee(A_ )
_lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def UpperCamelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , ) -> str:
"""simple docstring"""
_lowerCamelCase = self.bert(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowerCamelCase = (logits[-1],)
if labels is not None:
_lowerCamelCase = None
_lowerCamelCase = 0
for ix, logits_item in enumerate(A_ ):
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase = MSELoss()
_lowerCamelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowerCamelCase = CrossEntropyLoss()
_lowerCamelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowerCamelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowerCamelCase = (total_loss / total_weights,) + outputs
return outputs
| 638
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
snake_case__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
snake_case__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __magic_name__( ) -> Dict:
'''simple docstring'''
_lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_lowerCamelCase = bs[:]
_lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
_lowerCamelCase = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = set()
_lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase = char
return pairs
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) -> str:
"""simple docstring"""
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase = json.load(A_ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = errors # how to handle errors in decoding
_lowerCamelCase = bytes_to_unicode()
_lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {}
_lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowerCamelCase = tuple(A_ )
_lowerCamelCase = get_pairs(A_ )
if not pairs:
return token
while True:
_lowerCamelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase = bigram
_lowerCamelCase = []
_lowerCamelCase = 0
while i < len(A_ ):
try:
_lowerCamelCase = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase = tuple(A_ )
_lowerCamelCase = new_word
if len(A_ ) == 1:
break
else:
_lowerCamelCase = get_pairs(A_ )
_lowerCamelCase = ''' '''.join(A_ )
_lowerCamelCase = word
return word
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = []
for token in re.findall(self.pat , A_ ):
_lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(''' ''' ) )
return bpe_tokens
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.decoder.get(A_ )
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = ''''''.join(A_ )
_lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '''\n''' )
_lowerCamelCase = 0
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''' '''.join(A_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self , A_ , A_=False , **A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
_lowerCamelCase = ''' ''' + text
return (text, kwargs)
| 638
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = TextaTextGenerationPipeline(model=A_ , tokenizer=A_ )
return generator, ["Something to write", "Something else"]
def UpperCamelCase_ ( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = generator('''Something there''' )
self.assertEqual(A_ , [{'''generated_text''': ANY(A_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_lowerCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
] , )
_lowerCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=A_ )
self.assertEqual(
A_ , [
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
[{'''generated_text''': ANY(A_ )}, {'''generated_text''': ANY(A_ )}],
] , )
with self.assertRaises(A_ ):
generator(4 )
@require_torch
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_lowerCamelCase = generator('''Something there''' , do_sample=A_ )
self.assertEqual(A_ , [{'''generated_text''': ''''''}] )
_lowerCamelCase = 3
_lowerCamelCase = generator(
'''Something there''' , num_return_sequences=A_ , num_beams=A_ , )
_lowerCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(A_ , A_ )
_lowerCamelCase = generator('''This is a test''' , do_sample=A_ , num_return_sequences=2 , return_tensors=A_ )
self.assertEqual(
A_ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_lowerCamelCase = generator.model.config.eos_token_id
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=A_ , num_return_sequences=2 , batch_size=2 , return_tensors=A_ , )
self.assertEqual(
A_ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_lowerCamelCase = generator('''Something there''' , do_sample=A_ )
self.assertEqual(A_ , [{'''generated_text''': ''''''}] )
| 638
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 1
|
from __future__ import annotations
def __magic_name__( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if len(__UpperCAmelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
_lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 1
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
snake_case__ = 0B101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
snake_case__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = WATERMARK_BITS
_lowerCamelCase = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
_lowerCamelCase = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCamelCase = [self.encoder.encode(A_ , '''dwtDct''' ) for image in images]
_lowerCamelCase = torch.from_numpy(np.array(A_ ) ).permute(0 , 3 , 1 , 2 )
_lowerCamelCase = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 638
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 1
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ = 1.0 , A_ = None , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_lowerCamelCase = initial_learning_rate
_lowerCamelCase = warmup_steps
_lowerCamelCase = power
_lowerCamelCase = decay_schedule_fn
_lowerCamelCase = name
def __call__( self , A_ ) -> Any:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_lowerCamelCase = tf.cast(A_ , tf.floataa )
_lowerCamelCase = tf.cast(self.warmup_steps , tf.floataa )
_lowerCamelCase = global_step_float / warmup_steps_float
_lowerCamelCase = self.initial_learning_rate * tf.math.pow(A_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=A_ , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 0.9 , __UpperCAmelCase = 0.9_9_9 , __UpperCAmelCase = 1E-8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = None , ) -> str:
'''simple docstring'''
_lowerCamelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , )
if num_warmup_steps:
_lowerCamelCase = WarmUp(
initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , )
if weight_decay_rate > 0.0:
_lowerCamelCase = AdamWeightDecay(
learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__UpperCAmelCase , )
else:
_lowerCamelCase = tf.keras.optimizers.Adam(
learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ = 0.001 , A_ = 0.9 , A_ = 0.999 , A_ = 1E-7 , A_ = False , A_ = 0.0 , A_ = None , A_ = None , A_ = "AdamWeightDecay" , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(A_ , A_ , A_ , A_ , A_ , A_ , **A_ )
_lowerCamelCase = weight_decay_rate
_lowerCamelCase = include_in_weight_decay
_lowerCamelCase = exclude_from_weight_decay
@classmethod
def UpperCamelCase_ ( cls , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = {'''WarmUp''': WarmUp}
return super(A_ , cls ).from_config(A_ , custom_objects=A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
super(A_ , self )._prepare_local(A_ , A_ , A_ )
_lowerCamelCase = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCamelCase_ ( self , A_ , A_=None , **A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = list(zip(*A_ ) )
return super(A_ , self ).apply_gradients(zip(A_ , A_ ) , name=A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_lowerCamelCase = apply_state or {}
_lowerCamelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_lowerCamelCase = self._fallback_apply_state(A_ , A_ )
_lowerCamelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> str:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self._get_lr(var.device , var.dtype.base_dtype , A_ )
_lowerCamelCase = self._decay_weights_op(A_ , A_ , A_ )
with tf.control_dependencies([decay] ):
return super(A_ , self )._resource_apply_dense(A_ , A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_=None ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self._get_lr(var.device , var.dtype.base_dtype , A_ )
_lowerCamelCase = self._decay_weights_op(A_ , A_ , A_ )
with tf.control_dependencies([decay] ):
return super(A_ , self )._resource_apply_sparse(A_ , A_ , A_ , **A_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(A_ , A_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(A_ , A_ ) is not None:
return False
return True
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self ) -> str:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = None
@property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self._accum_steps is None:
_lowerCamelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=A_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , A_ ) -> List[str]:
"""simple docstring"""
if not self._gradients:
_lowerCamelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(A_ ) , trainable=A_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(A_ ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(A_ )}' )
for accum_gradient, gradient in zip(self._gradients , A_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(A_ )
self._accum_steps.assign_add(1 )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(A_ ) )
| 638
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 1
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 1
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __magic_name__( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 638
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 1
|
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowerCamelCase = 6
_lowerCamelCase = 1
_lowerCamelCase = 1901
_lowerCamelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowerCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowerCamelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case__ = 'src/diffusers'
snake_case__ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
snake_case__ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
snake_case__ = spec.loader.load_module()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return line.startswith(__UpperCAmelCase ) or len(__UpperCAmelCase ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , __UpperCAmelCase ) is not None
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = object_name.split('''.''' )
_lowerCamelCase = 0
# First let's find the module where our object lives.
_lowerCamelCase = parts[i]
while i < len(__UpperCAmelCase ) and not os.path.isfile(os.path.join(__UpperCAmelCase , F'{module}.py' ) ):
i += 1
if i < len(__UpperCAmelCase ):
_lowerCamelCase = os.path.join(__UpperCAmelCase , parts[i] )
if i >= len(__UpperCAmelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__UpperCAmelCase , F'{module}.py' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
# Now let's find the class / func in the code!
_lowerCamelCase = ''''''
_lowerCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(__UpperCAmelCase ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__UpperCAmelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCamelCase = line_index
while line_index < len(__UpperCAmelCase ) and _should_continue(lines[line_index] , __UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase = lines[start_index:line_index]
return "".join(__UpperCAmelCase )
snake_case__ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
snake_case__ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
snake_case__ = re.compile(R'<FILL\s+[^>]*>')
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = code.split('''\n''' )
_lowerCamelCase = 0
while idx < len(__UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__UpperCAmelCase ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def __magic_name__( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = len(get_indent(__UpperCAmelCase ) ) > 0
if has_indent:
_lowerCamelCase = F'class Bla:\n{code}'
_lowerCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__UpperCAmelCase )
_lowerCamelCase = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = style_docstrings_in_code(__UpperCAmelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = []
_lowerCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__UpperCAmelCase ):
_lowerCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = search.groups()
_lowerCamelCase = find_code_in_diffusers(__UpperCAmelCase )
_lowerCamelCase = get_indent(__UpperCAmelCase )
_lowerCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCamelCase = theoretical_indent
_lowerCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCamelCase = True
while line_index < len(__UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(__UpperCAmelCase ):
break
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _should_continue(__UpperCAmelCase , __UpperCAmelCase ) and re.search(F'^{indent}# End copy' , __UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase = lines[start_index:line_index]
_lowerCamelCase = ''''''.join(__UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCamelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__UpperCAmelCase ) is None]
_lowerCamelCase = '''\n'''.join(__UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_lowerCamelCase = [_re_replace_pattern.search(__UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = pattern.groups()
_lowerCamelCase = re.sub(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if option.strip() == "all-casing":
_lowerCamelCase = re.sub(obja.lower() , obja.lower() , __UpperCAmelCase )
_lowerCamelCase = re.sub(obja.upper() , obja.upper() , __UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCamelCase = blackify(lines[start_index - 1] + theoretical_code )
_lowerCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCamelCase = start_index + 1
if overwrite and len(__UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__UpperCAmelCase )
return diffs
def __magic_name__( __UpperCAmelCase = False ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = glob.glob(os.path.join(__UpperCAmelCase , '''**/*.py''' ) , recursive=__UpperCAmelCase )
_lowerCamelCase = []
for filename in all_files:
_lowerCamelCase = is_copy_consistent(__UpperCAmelCase , __UpperCAmelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join(__UpperCAmelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
snake_case__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 638
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 3
_lowerCamelCase = (32, 32)
_lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = output.images
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=A_ , )[0]
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
_lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCamelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = output.images
assert image.shape[0] == 2
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCamelCase = unet.half()
_lowerCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , ).images
_lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type='''np''' , )
_lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 638
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not is_accelerate_available():
return method
_lowerCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *__UpperCAmelCase , **__UpperCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCAmelCase , **__UpperCAmelCase )
return wrapper
| 638
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 1
|
import os
from collections.abc import Iterator
def __magic_name__( __UpperCAmelCase = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__UpperCAmelCase ):
_lowerCamelCase = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase ).lstrip('''./''' )
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(__UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __magic_name__( __UpperCAmelCase = "." ) -> None:
'''simple docstring'''
_lowerCamelCase = ''''''
for filepath in sorted(good_file_paths(__UpperCAmelCase ) ):
_lowerCamelCase , _lowerCamelCase = os.path.split(__UpperCAmelCase )
if filepath != old_path:
_lowerCamelCase = print_path(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
_lowerCamelCase = F'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
_lowerCamelCase = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F'{md_prefix(__UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 638
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 1
|
snake_case__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = input('''Enter message: ''' )
_lowerCamelCase = input('''Enter key [alphanumeric]: ''' )
_lowerCamelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
_lowerCamelCase = '''encrypt'''
_lowerCamelCase = encrypt_message(__UpperCAmelCase , __UpperCAmelCase )
elif mode.lower().startswith('''d''' ):
_lowerCamelCase = '''decrypt'''
_lowerCamelCase = decrypt_message(__UpperCAmelCase , __UpperCAmelCase )
print(F'\n{mode.title()}ed message:' )
print(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return translate_message(__UpperCAmelCase , __UpperCAmelCase , '''encrypt''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return translate_message(__UpperCAmelCase , __UpperCAmelCase , '''decrypt''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = key.upper()
for symbol in message:
_lowerCamelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__UpperCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__UpperCAmelCase ):
_lowerCamelCase = 0
else:
translated.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 638
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 1
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = None
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCamelCase = []
for i in range(__UpperCAmelCase ):
_lowerCamelCase = i / num_diffusion_timesteps
_lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 10_00 , A_ = "fixed_small_log" , A_ = True , A_ = 1.0 , A_ = "epsilon" , A_ = "squaredcos_cap_v2" , ) -> str:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
_lowerCamelCase = betas_for_alpha_bar(A_ )
_lowerCamelCase = 1.0 - self.betas
_lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
_lowerCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase = 1.0
# setable values
_lowerCamelCase = None
_lowerCamelCase = torch.from_numpy(np.arange(0 , A_ )[::-1].copy() )
_lowerCamelCase = variance_type
def UpperCamelCase_ ( self , A_ , A_ = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = num_inference_steps
_lowerCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase = torch.from_numpy(A_ ).to(A_ )
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=None ) -> List[Any]:
"""simple docstring"""
if prev_timestep is None:
_lowerCamelCase = t - 1
_lowerCamelCase = self.alphas_cumprod[t]
_lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase = self.betas[t]
else:
_lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase = torch.log(torch.clamp(A_ , min=1E-2_0 ) )
_lowerCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase = variance.log()
_lowerCamelCase = beta.log()
_lowerCamelCase = (predicted_variance + 1) / 2
_lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ = None , A_=None , A_ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase , _lowerCamelCase = torch.split(A_ , sample.shape[1] , dim=1 )
else:
_lowerCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase = t - 1
_lowerCamelCase = self.alphas_cumprod[t]
_lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase = self.betas[t]
_lowerCamelCase = self.alphas[t]
else:
_lowerCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase = torch.clamp(
A_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase = 0
if t > 0:
_lowerCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=A_ , device=model_output.device )
_lowerCamelCase = self._get_variance(
A_ , predicted_variance=A_ , prev_timestep=A_ , )
if self.variance_type == "fixed_small_log":
_lowerCamelCase = variance
elif self.variance_type == "learned_range":
_lowerCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
''' for the UnCLIPScheduler.''' )
_lowerCamelCase = variance * variance_noise
_lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ , ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_lowerCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_lowerCamelCase = timesteps.to(original_samples.device )
_lowerCamelCase = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 638
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=6 , A_=17 , A_=23 , A_=11 , A_=True , ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = act_dim
_lowerCamelCase = state_dim
_lowerCamelCase = hidden_size
_lowerCamelCase = max_length
_lowerCamelCase = is_training
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
_lowerCamelCase = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = DecisionTransformerModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , A_ , A_ , A_ , A_ , A_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (DecisionTransformerModel,) if is_torch_available() else ()
A_ = ()
A_ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = DecisionTransformerModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = DecisionTransformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(A_ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A_ )] , A_ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = 2 # number of steps of autoregressive prediction we will perform
_lowerCamelCase = 10 # defined by the RL environment, may be normalized
_lowerCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_lowerCamelCase = model.to(A_ )
_lowerCamelCase = model.config
torch.manual_seed(0 )
_lowerCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=A_ , dtype=torch.floataa ) # env.reset()
_lowerCamelCase = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A_ )
_lowerCamelCase = torch.tensor(A_ , device=A_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_lowerCamelCase = state
_lowerCamelCase = torch.zeros(1 , 0 , config.act_dim , device=A_ , dtype=torch.floataa )
_lowerCamelCase = torch.zeros(1 , 0 , device=A_ , dtype=torch.floataa )
_lowerCamelCase = torch.tensor(0 , device=A_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(A_ ):
_lowerCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A_ )] , dim=1 )
_lowerCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=A_ )] , dim=1 )
_lowerCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = model(
states=A_ , actions=A_ , rewards=A_ , returns_to_go=A_ , timesteps=A_ , attention_mask=A_ , return_dict=A_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A_ , dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCamelCase = action_pred[0, -1]
_lowerCamelCase = torch.cat([states, state] , dim=1 )
_lowerCamelCase = returns_to_go[0, -1] - reward
_lowerCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_lowerCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=A_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A_ = Features({'question': Value('string' ), 'context': Value('string' )} )
A_ = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A_ = "question"
A_ = "context"
A_ = "answers"
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 638
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
with open(__UpperCAmelCase ) as metadata_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=__UpperCAmelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_lowerCamelCase = load_original_entity_vocab(__UpperCAmelCase )
# add an entry for [MASK2]
_lowerCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , '''tokenizer_config.json''' ) , '''r''' ) as f:
_lowerCamelCase = json.load(__UpperCAmelCase )
_lowerCamelCase = '''MLukeTokenizer'''
with open(os.path.join(__UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
# Initialize the embeddings of the special tokens
_lowerCamelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_lowerCamelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[ent_init_index].unsqueeze(0 )
_lowerCamelCase = word_emb[enta_init_index].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCamelCase = state_dict[bias_name]
_lowerCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F'encoder.layer.{layer_index}.attention.self.'
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_lowerCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCamelCase = state_dict['''entity_predictions.bias''']
_lowerCamelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_lowerCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCamelCase = LukeForMaskedLM(config=__UpperCAmelCase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_lowerCamelCase = state_dict[key]
else:
_lowerCamelCase = state_dict[key]
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if set(__UpperCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(__UpperCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCamelCase = MLukeTokenizer.from_pretrained(__UpperCAmelCase , task='''entity_classification''' )
_lowerCamelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_lowerCamelCase = (0, 9)
_lowerCamelCase = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
_lowerCamelCase = model(**__UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCamelCase = torch.Size((1, 33, 768) )
_lowerCamelCase = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCamelCase = torch.Size((1, 1, 768) )
_lowerCamelCase = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCamelCase = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
_lowerCamelCase = '''Tokyo is the capital of <mask>.'''
_lowerCamelCase = (24, 30)
_lowerCamelCase = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
_lowerCamelCase = model(**__UpperCAmelCase )
_lowerCamelCase = encoding['''input_ids'''][0].tolist()
_lowerCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_lowerCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__UpperCAmelCase )
_lowerCamelCase = outputs.entity_logits[0][0].argmax().item()
_lowerCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__UpperCAmelCase ) )
model.save_pretrained(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_lowerCamelCase = [json.loads(__UpperCAmelCase ) for line in open(__UpperCAmelCase )]
_lowerCamelCase = {}
for entry in data:
_lowerCamelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCamelCase = entity_id
break
_lowerCamelCase = F'{language}:{entity_name}'
_lowerCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
snake_case__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 638
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
def run_func(__UpperCAmelCase ):
@wraps(__UpperCAmelCase )
def run_in_eager_mode(*__UpperCAmelCase , **__UpperCAmelCase ):
return func(*__UpperCAmelCase , **__UpperCAmelCase )
@wraps(__UpperCAmelCase )
@tf.function(experimental_compile=__UpperCAmelCase )
def run_in_graph_mode(*__UpperCAmelCase , **__UpperCAmelCase ):
return func(*__UpperCAmelCase , **__UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> ["tf.Tensor"]:
'''simple docstring'''
_lowerCamelCase = random.Random()
_lowerCamelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = 42
A_ = "TensorFlow"
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return tf.__version__
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> float:
"""simple docstring"""
# initialize GPU on separate process
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> float:
"""simple docstring"""
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_speed(_train )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
_lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_lowerCamelCase = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_memory(_train )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Callable[[], None]:
"""simple docstring"""
_lowerCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_lowerCamelCase = (
hasattr(A_ , '''architectures''' )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
_lowerCamelCase = getattr(A_ , A_ )
_lowerCamelCase = model_cls(A_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_lowerCamelCase = TF_MODEL_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
_lowerCamelCase = config.vocab_size if hasattr(A_ , '''vocab_size''' ) else config.encoder.vocab_size
_lowerCamelCase = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A_ , decoder_input_ids=A_ , training=A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A_ , training=A_ )
_lowerCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Callable[[], None]:
"""simple docstring"""
_lowerCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_lowerCamelCase = (
hasattr(A_ , '''architectures''' )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
_lowerCamelCase = getattr(A_ , A_ )
_lowerCamelCase = model_cls(A_ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_lowerCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
_lowerCamelCase = config.vocab_size if hasattr(A_ , '''vocab_size''' ) else config.encoder.vocab_size
_lowerCamelCase = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCamelCase = model(A_ , decoder_input_ids=A_ , labels=A_ , training=A_ )[0]
_lowerCamelCase = tf.gradients(A_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCamelCase = model(A_ , labels=A_ , training=A_ )[0]
_lowerCamelCase = tf.gradients(A_ , model.trainable_variables )
return gradients
_lowerCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self , A_ ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCamelCase = timeit.repeat(
A_ , repeat=self.args.repeat , number=10 , )
return min(A_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def UpperCamelCase_ ( self , A_ ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
_lowerCamelCase = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
_lowerCamelCase = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
_lowerCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCamelCase = nvml.nvmlDeviceGetMemoryInfo(A_ )
_lowerCamelCase = meminfo.used
_lowerCamelCase = Memory(A_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
_lowerCamelCase = None
else:
_lowerCamelCase = measure_peak_memory_cpu(A_ )
_lowerCamelCase = Memory(A_ ) if isinstance(A_ , A_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCamelCase = stop_memory_tracing(A_ )
if memory is None:
_lowerCamelCase = summary.total
else:
_lowerCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 638
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 1
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
snake_case__ = get_logger(__name__)
class UpperCamelCase :
'''simple docstring'''
A_ = 'dummy_data'
A_ = 'datasets'
A_ = False
def __init__( self , A_ , A_ , A_ , A_ = None , A_ = False , A_ = True , A_ = None , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = 0
_lowerCamelCase = dataset_name
_lowerCamelCase = cache_dir
_lowerCamelCase = use_local_dummy_data
_lowerCamelCase = config
# download_callbacks take a single url as input
_lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase = str(A_ )
# to be downloaded
_lowerCamelCase = None
_lowerCamelCase = None
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
_lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase = cached_path(
A_ , cache_dir=self.cache_dir , extract_compressed_file=A_ , force_extract=A_ )
return os.path.join(A_ , self.dummy_file_name )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if self._bucket_url is None:
_lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def UpperCamelCase_ ( self , A_ , *A_ ) -> List[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A_ , A_ ):
return self.create_dummy_data_dict(A_ , A_ )
elif isinstance(A_ , (list, tuple) ):
return self.create_dummy_data_list(A_ , A_ )
else:
return self.create_dummy_data_single(A_ , A_ )
def UpperCamelCase_ ( self , A_ , *A_ ) -> List[Any]:
"""simple docstring"""
return self.download_and_extract(A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(A_ )
def UpperCamelCase_ ( self , A_ , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return path
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {}
def UpperCamelCase_ ( self , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A_ , A_ ):
for single_url in single_urls:
download_callback(A_ )
else:
_lowerCamelCase = single_urls
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A_ , A_ ):
_lowerCamelCase = [os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) ) for x in single_urls]
else:
_lowerCamelCase = single_urls
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) )
_lowerCamelCase = value
# make sure that values are unique
if all(isinstance(A_ , A_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A_ ) ) for url in data_url )
_lowerCamelCase = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase = [data_url[0]] * len(A_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A_ )
return dummy_data_list
def UpperCamelCase_ ( self , A_ , A_ ) -> Any:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
def _iter_archive_members(A_ ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase = Path(self.dummy_file ).parent
_lowerCamelCase = path.relative_to(A_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A_ )
_lowerCamelCase = Path(A_ )
_lowerCamelCase = _iter_archive_members(A_ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A_ ).as_posix(), file_path.open('''rb''' )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ):
_lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(A_ ):
if os.path.basename(A_ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A_ ):
if os.path.basename(A_ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A_ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A_ , A_ )
| 638
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if "resnet-50" in model_name:
_lowerCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
_lowerCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
_lowerCamelCase = DetrConfig(use_timm_backbone=__UpperCAmelCase , backbone_config=__UpperCAmelCase )
# set label attributes
_lowerCamelCase = '''panoptic''' in model_name
if is_panoptic:
_lowerCamelCase = 250
else:
_lowerCamelCase = 91
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''coco-detection-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = state_dict.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = ''''''
if is_panoptic:
_lowerCamelCase = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_lowerCamelCase = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[:256, :]
_lowerCamelCase = in_proj_bias[:256]
_lowerCamelCase = in_proj_weight[256:512, :]
_lowerCamelCase = in_proj_bias[256:512]
_lowerCamelCase = in_proj_weight[-256:, :]
_lowerCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCamelCase = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_lowerCamelCase = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[:256, :]
_lowerCamelCase = in_proj_bias[:256]
_lowerCamelCase = in_proj_weight[256:512, :]
_lowerCamelCase = in_proj_bias[256:512]
_lowerCamelCase = in_proj_weight[-256:, :]
_lowerCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowerCamelCase = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_lowerCamelCase = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCamelCase = in_proj_weight_cross_attn[:256, :]
_lowerCamelCase = in_proj_bias_cross_attn[:256]
_lowerCamelCase = in_proj_weight_cross_attn[256:512, :]
_lowerCamelCase = in_proj_bias_cross_attn[256:512]
_lowerCamelCase = in_proj_weight_cross_attn[-256:, :]
_lowerCamelCase = in_proj_bias_cross_attn[-256:]
def __magic_name__( ) -> int:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase = get_detr_config(__UpperCAmelCase )
# load original model from torch hub
_lowerCamelCase = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'Converting model {model_name}...' )
_lowerCamelCase = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCAmelCase ):
if is_panoptic:
_lowerCamelCase = '''detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
_lowerCamelCase = state_dict.pop(__UpperCAmelCase )
_lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase = state_dict.pop(__UpperCAmelCase )
_lowerCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
_lowerCamelCase = state_dict.pop(__UpperCAmelCase )
_lowerCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_lowerCamelCase = state_dict.pop(__UpperCAmelCase )
_lowerCamelCase = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase = DetrForSegmentation(__UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify our conversion on an image
_lowerCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
_lowerCamelCase = DetrImageProcessor(format=__UpperCAmelCase )
_lowerCamelCase = processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = detr(__UpperCAmelCase )
_lowerCamelCase = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
snake_case__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
@flax_register_to_config
class UpperCamelCase ( nn.Module , __lowercase , __lowercase ):
'''simple docstring'''
A_ = 32
A_ = 4
A_ = 4
A_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A_ = False
A_ = (320, 640, 1_280, 1_280)
A_ = 2
A_ = 8
A_ = None
A_ = 1_280
A_ = 0.0
A_ = False
A_ = jnp.floataa
A_ = True
A_ = 0
A_ = False
def UpperCamelCase_ ( self , A_ ) -> FrozenDict:
"""simple docstring"""
# init input tensors
_lowerCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCamelCase = jnp.zeros(A_ , dtype=jnp.floataa )
_lowerCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_lowerCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_lowerCamelCase , _lowerCamelCase = jax.random.split(A_ )
_lowerCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A_ , A_ , A_ , A_ )["params"]
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.block_out_channels
_lowerCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_lowerCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_lowerCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_lowerCamelCase = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
_lowerCamelCase = self.only_cross_attention
if isinstance(A_ , A_ ):
_lowerCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
_lowerCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_lowerCamelCase = []
_lowerCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
_lowerCamelCase = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowerCamelCase = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
_lowerCamelCase = down_blocks
# mid
_lowerCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_lowerCamelCase = []
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_lowerCamelCase = output_channel
_lowerCamelCase = reversed_block_out_channels[i]
_lowerCamelCase = reversed_block_out_channels[min(i + 1 , len(A_ ) - 1 )]
_lowerCamelCase = i == len(A_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowerCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowerCamelCase = FlaxUpBlockaD(
in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(A_ )
_lowerCamelCase = output_channel
_lowerCamelCase = up_blocks
# out
_lowerCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowerCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_=None , A_=None , A_ = True , A_ = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
# 1. time
if not isinstance(A_ , jnp.ndarray ):
_lowerCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps.astype(dtype=jnp.floataa )
_lowerCamelCase = jnp.expand_dims(A_ , 0 )
_lowerCamelCase = self.time_proj(A_ )
_lowerCamelCase = self.time_embedding(A_ )
# 2. pre-process
_lowerCamelCase = jnp.transpose(A_ , (0, 2, 3, 1) )
_lowerCamelCase = self.conv_in(A_ )
# 3. down
_lowerCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
_lowerCamelCase , _lowerCamelCase = down_block(A_ , A_ , A_ , deterministic=not train )
else:
_lowerCamelCase , _lowerCamelCase = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowerCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
A_ , A_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowerCamelCase = new_down_block_res_samples
# 4. mid
_lowerCamelCase = self.mid_block(A_ , A_ , A_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowerCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_lowerCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(A_ , A_ ):
_lowerCamelCase = up_block(
A_ , temb=A_ , encoder_hidden_states=A_ , res_hidden_states_tuple=A_ , deterministic=not train , )
else:
_lowerCamelCase = up_block(A_ , temb=A_ , res_hidden_states_tuple=A_ , deterministic=not train )
# 6. post-process
_lowerCamelCase = self.conv_norm_out(A_ )
_lowerCamelCase = nn.silu(A_ )
_lowerCamelCase = self.conv_out(A_ )
_lowerCamelCase = jnp.transpose(A_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=A_ )
| 638
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = 5
# Realm tok
_lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(A_ , exist_ok=A_ )
_lowerCamelCase = os.path.join(A_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(A_ , exist_ok=A_ )
def UpperCamelCase_ ( self ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=A_ , )
return block_records
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_config()
_lowerCamelCase = self.get_dummy_retriever()
_lowerCamelCase = retriever.tokenizer
_lowerCamelCase = np.array([0, 3] , dtype='''long''' )
_lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase = tokenizer(
['''the fourth'''] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_lowerCamelCase = config.reader_seq_len
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors='''np''' )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_config()
_lowerCamelCase = self.get_dummy_retriever()
_lowerCamelCase = retriever.tokenizer
_lowerCamelCase = np.array([0, 3, 5] , dtype='''long''' )
_lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_lowerCamelCase = config.reader_seq_len
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCamelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 638
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
snake_case__ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 1
|
def __magic_name__( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , __UpperCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 638
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
snake_case__ = True
except (ImportError, AttributeError):
snake_case__ = object
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
pass
snake_case__ = False
snake_case__ = logging.get_logger('transformers-cli/serving')
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCAmelCase , args.host , args.port , args.workers )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=A_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=A_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=A_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=A_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=A_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=A_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=A_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=A_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'Serving model over {host}:{port}' )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=A_ , response_class=A_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=A_ , response_class=A_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=A_ , response_class=A_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=A_ , response_class=A_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCamelCase_ ( self , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) ) -> str:
"""simple docstring"""
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(A_ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(A_ )
return ServeTokenizeResult(tokens=A_ , tokens_ids=A_ )
else:
return ServeTokenizeResult(tokens=A_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(A_ )} )
def UpperCamelCase_ ( self , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) , ) -> int:
"""simple docstring"""
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(A_ , A_ , A_ )
return ServeDeTokenizeResult(model='''''' , text=A_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(A_ )} )
async def UpperCamelCase_ ( self , A_=Body(A_ , embed=A_ ) ) -> Any:
"""simple docstring"""
# Check we don't have empty string
if len(A_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(A_ )
return ServeForwardResult(output=A_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(A_ )} )
| 638
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def decorator(__UpperCAmelCase ):
_lowerCamelCase = getattr(__UpperCAmelCase , '''handle_key''' , [] )
handle += [key]
setattr(__UpperCAmelCase , '''handle_key''' , __UpperCAmelCase )
return func
return decorator
def __magic_name__( *__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
def decorator(__UpperCAmelCase ):
_lowerCamelCase = getattr(__UpperCAmelCase , '''handle_key''' , [] )
handle += keys
setattr(__UpperCAmelCase , '''handle_key''' , __UpperCAmelCase )
return func
return decorator
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __new__( cls , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = super().__new__(cls , A_ , A_ , A_ )
if not hasattr(A_ , '''key_handler''' ):
setattr(A_ , '''key_handler''' , {} )
setattr(A_ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCamelCase = getattr(A_ , '''handle_key''' , [] )
for key in handled_keys:
_lowerCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = get_character()
if char != KEYMAP["undefined"]:
_lowerCamelCase = ord(A_ )
_lowerCamelCase = cls.key_handler.get(A_ )
if handler:
_lowerCamelCase = char
return handler(cls )
else:
return None
def __magic_name__( cls ) -> List[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 638
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __magic_name__( __UpperCAmelCase=32 , __UpperCAmelCase=10 , __UpperCAmelCase=100 , __UpperCAmelCase=1026 , __UpperCAmelCase=True , __UpperCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , __UpperCAmelCase="igf_context_pairs.jbl" , ) -> Dict:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
_lowerCamelCase , _lowerCamelCase = generate_datasets(
__UpperCAmelCase , __UpperCAmelCase , number=__UpperCAmelCase , min_len=1026 , trim=__UpperCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowerCamelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
_lowerCamelCase = load_gpta('''gpt2''' ).to(__UpperCAmelCase )
print('''computing perplexity on objective set''' )
_lowerCamelCase = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).item()
print('''perplexity on objective set:''' , __UpperCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=15 , __UpperCAmelCase=128 , __UpperCAmelCase=100 , __UpperCAmelCase="igf_model.pt" , ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
_lowerCamelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
_lowerCamelCase = SecondaryLearner(__UpperCAmelCase )
# Train secondary learner
_lowerCamelCase = train_secondary_learner(
__UpperCAmelCase , __UpperCAmelCase , max_epochs=__UpperCAmelCase , batch_size=__UpperCAmelCase , eval_freq=100 , igf_model_path=__UpperCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=32 , __UpperCAmelCase=1000 , __UpperCAmelCase=16 , __UpperCAmelCase=1.0 , __UpperCAmelCase=recopy_gpta , __UpperCAmelCase=None , __UpperCAmelCase=10 , __UpperCAmelCase="gpt2_finetuned.pt" , ) -> str:
'''simple docstring'''
_lowerCamelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
_lowerCamelCase = RandomSampler(__UpperCAmelCase )
_lowerCamelCase = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase )
_lowerCamelCase = max_steps // (len(__UpperCAmelCase )) + 1
_lowerCamelCase = 0
_lowerCamelCase = torch.zeros((1, context_len) , dtype=torch.long , device=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = recopy_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__UpperCAmelCase )
secondary_learner.eval()
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = []
_lowerCamelCase = []
# Compute the performance of the transformer model at the beginning
_lowerCamelCase = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
test_perps.append(__UpperCAmelCase )
print('''Test perplexity, step''' , __UpperCAmelCase , ''':''' , __UpperCAmelCase )
for epoch in range(int(__UpperCAmelCase ) ):
for step, example in enumerate(__UpperCAmelCase ):
torch.cuda.empty_cache()
_lowerCamelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
_lowerCamelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowerCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
_lowerCamelCase = True
if secondary_learner is not None:
_lowerCamelCase = secondary_learner.forward(
torch.tensor(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__UpperCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowerCamelCase = -1
if predicted_q < threshold:
_lowerCamelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowerCamelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowerCamelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowerCamelCase = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
test_perps.append(__UpperCAmelCase )
print('''Test perplexity, step''' , __UpperCAmelCase , ''':''' , __UpperCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __UpperCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __magic_name__( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=__UpperCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=__UpperCAmelCase , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=__UpperCAmelCase , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=__UpperCAmelCase , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=__UpperCAmelCase , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=__UpperCAmelCase , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=__UpperCAmelCase , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=__UpperCAmelCase , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=__UpperCAmelCase , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=__UpperCAmelCase , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=__UpperCAmelCase , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=__UpperCAmelCase , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__UpperCAmelCase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
_lowerCamelCase = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
_lowerCamelCase = training_secondary_learner(
__UpperCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
_lowerCamelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowerCamelCase , _lowerCamelCase = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=__UpperCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__UpperCAmelCase , secondary_learner=__UpperCAmelCase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 638
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 1
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> float:
'''simple docstring'''
_lowerCamelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
_lowerCamelCase = 1 - (matter_density + radiation_density + dark_energy)
_lowerCamelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_lowerCamelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 638
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
import numpy
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCamelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCamelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCamelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCamelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCamelCase = numpy.zeros(output_array.shape )
def UpperCamelCase_ ( self ) -> numpy.ndarray:
"""simple docstring"""
_lowerCamelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
_lowerCamelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCamelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCamelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
_lowerCamelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCamelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = input_arr
_lowerCamelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __magic_name__( __UpperCAmelCase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __magic_name__( __UpperCAmelCase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def __magic_name__( ) -> int:
'''simple docstring'''
_lowerCamelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCamelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCamelCase = TwoHiddenLayerNeuralNetwork(
input_array=__UpperCAmelCase , output_array=__UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__UpperCAmelCase , iterations=10 , give_loss=__UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 638
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 1
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=4 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=0.02 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = rotary_dim
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = None
_lowerCamelCase = vocab_size - 1
_lowerCamelCase = vocab_size - 1
_lowerCamelCase = vocab_size - 1
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = 20
_lowerCamelCase = model_class_name(A_ )
_lowerCamelCase = model.init_cache(input_ids.shape[0] , A_ )
_lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
_lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=A_ , past_key_values=outputs_cache.past_key_values , position_ids=A_ , )
_lowerCamelCase = model(A_ )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = 20
_lowerCamelCase = model_class_name(A_ )
_lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_lowerCamelCase = model.init_cache(input_ids.shape[0] , A_ )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
_lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=A_ , position_ids=A_ , )
_lowerCamelCase = model(A_ , attention_mask=A_ )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = FlaxGPTJModelTester(self )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(A_ , A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
A_ , A_ , A_ , A_ )
@tooslow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
_lowerCamelCase = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=A_ , truncation=A_ )
_lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
_lowerCamelCase = False
_lowerCamelCase = model.config.eos_token_id
_lowerCamelCase = jax.jit(model.generate )
_lowerCamelCase = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
_lowerCamelCase = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
_lowerCamelCase = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(A_ , A_ )
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCamelCase = self._prepare_for_class(A_ , A_ )
_lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase = getattr(A_ , A_ )
_lowerCamelCase , _lowerCamelCase = pt_inputs['''input_ids'''].shape
_lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = pt_model_class(A_ ).eval()
_lowerCamelCase = model_class(A_ , dtype=jnp.floataa )
_lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ )
_lowerCamelCase = fx_state
with torch.no_grad():
_lowerCamelCase = pt_model(**A_ ).to_tuple()
_lowerCamelCase = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
_lowerCamelCase = model_class.from_pretrained(A_ , from_pt=A_ )
_lowerCamelCase = fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowerCamelCase = self._prepare_for_class(A_ , A_ )
_lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase = getattr(A_ , A_ )
_lowerCamelCase = pt_model_class(A_ ).eval()
_lowerCamelCase = model_class(A_ , dtype=jnp.floataa )
_lowerCamelCase = load_flax_weights_in_pytorch_model(A_ , fx_model.params )
_lowerCamelCase , _lowerCamelCase = pt_inputs['''input_ids'''].shape
_lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = 0
_lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_lowerCamelCase = pt_model(**A_ ).to_tuple()
_lowerCamelCase = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
_lowerCamelCase = pt_model_class.from_pretrained(A_ , from_flax=A_ )
with torch.no_grad():
_lowerCamelCase = pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
_lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
| 638
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
snake_case__ = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
snake_case__ = {
'RUCAIBox/mvp': 1024,
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
A_ = MvpTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , A_=True , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
_lowerCamelCase = getattr(A_ , pre_tok_state.pop('''type''' ) )
_lowerCamelCase = add_prefix_space
_lowerCamelCase = pre_tok_class(**A_ )
_lowerCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase = '''post_processor'''
_lowerCamelCase = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
_lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase = tuple(state['''cls'''] )
_lowerCamelCase = False
if state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
_lowerCamelCase = add_prefix_space
_lowerCamelCase = True
if state.get('''trim_offsets''' , A_ ) != trim_offsets:
_lowerCamelCase = trim_offsets
_lowerCamelCase = True
if changes_to_apply:
_lowerCamelCase = getattr(A_ , state.pop('''type''' ) )
_lowerCamelCase = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
_lowerCamelCase = value
def UpperCamelCase_ ( self , *A_ , **A_ ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase = kwargs.get('''is_split_into_words''' , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*A_ , **A_ )
def UpperCamelCase_ ( self , *A_ , **A_ ) -> BatchEncoding:
"""simple docstring"""
_lowerCamelCase = kwargs.get('''is_split_into_words''' , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def UpperCamelCase_ ( self , A_ , A_=None ) -> int:
"""simple docstring"""
_lowerCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 638
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 1
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case__ = True
except ImportError:
snake_case__ = False
snake_case__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=A_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=A_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , A_=None , *A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = testing
_lowerCamelCase = testing_file
_lowerCamelCase = path
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_lowerCamelCase = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_lowerCamelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A_ , extra_context=A_ , )
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(A_ )
_lowerCamelCase = configuration['''lowercase_modelname''']
_lowerCamelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'{directory}/configuration.json' )
_lowerCamelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ , exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , '''w''' ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(A_ ):
with open(A_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
with open(A_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ , A_ , A_ ):
# Create temp file
_lowerCamelCase , _lowerCamelCase = mkstemp()
_lowerCamelCase = False
with fdopen(A_ , '''w''' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
_lowerCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ , A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ , A_ )
def skip_units(A_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ ):
with open(A_ ) as datafile:
_lowerCamelCase = []
_lowerCamelCase = False
_lowerCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ , A_ , A_ )
_lowerCamelCase = []
elif "# Replace with" in line and "##" not in line:
_lowerCamelCase = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ )
| 638
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 1
|
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
snake_case__ = logging.getLogger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_=-1 ) -> Dict:
"""simple docstring"""
# in NER datasets, the last column is usually reserved for NER label
_lowerCamelCase = label_idx
def UpperCamelCase_ ( self , A_ , A_ ) -> List[InputExample]:
"""simple docstring"""
if isinstance(A_ , A_ ):
_lowerCamelCase = mode.value
_lowerCamelCase = os.path.join(A_ , F'{mode}.txt' )
_lowerCamelCase = 1
_lowerCamelCase = []
with open(A_ , encoding='''utf-8''' ) as f:
_lowerCamelCase = []
_lowerCamelCase = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=A_ , labels=A_ ) )
guid_index += 1
_lowerCamelCase = []
_lowerCamelCase = []
else:
_lowerCamelCase = line.split(''' ''' )
words.append(splits[0] )
if len(A_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=A_ , labels=A_ ) )
return examples
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(A_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowerCamelCase = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(A_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
if path:
with open(A_ , '''r''' ) as f:
_lowerCamelCase = f.read().splitlines()
if "O" not in labels:
_lowerCamelCase = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self ) -> List[str]:
"""simple docstring"""
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
if path:
with open(A_ , '''r''' ) as f:
_lowerCamelCase = f.read().splitlines()
if "O" not in labels:
_lowerCamelCase = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self , A_ , A_ ) -> List[InputExample]:
"""simple docstring"""
if isinstance(A_ , A_ ):
_lowerCamelCase = mode.value
_lowerCamelCase = os.path.join(A_ , F'{mode}.txt' )
_lowerCamelCase = 1
_lowerCamelCase = []
with open(A_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(A_ ):
_lowerCamelCase = []
_lowerCamelCase = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(A_ ) == len(A_ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=A_ , labels=A_ ) )
guid_index += 1
return examples
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = 0
for sentence in parse_incr(A_ ):
_lowerCamelCase = preds_list[example_id]
_lowerCamelCase = ''''''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(A_ )
example_id += 1
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
if path:
with open(A_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 638
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 1
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCamelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCamelCase = min(__UpperCAmelCase , __UpperCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 638
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 't5'
A_ = ['past_key_values']
A_ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , A_=3_21_28 , A_=5_12 , A_=64 , A_=20_48 , A_=6 , A_=None , A_=8 , A_=32 , A_=1_28 , A_=0.1 , A_=1E-6 , A_=1.0 , A_="relu" , A_=True , A_=True , A_=0 , A_=1 , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_kv
_lowerCamelCase = d_ff
_lowerCamelCase = num_layers
_lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase = num_heads
_lowerCamelCase = relative_attention_num_buckets
_lowerCamelCase = relative_attention_max_distance
_lowerCamelCase = dropout_rate
_lowerCamelCase = layer_norm_epsilon
_lowerCamelCase = initializer_factor
_lowerCamelCase = feed_forward_proj
_lowerCamelCase = use_cache
_lowerCamelCase = self.feed_forward_proj.split('''-''' )
_lowerCamelCase = act_info[-1]
_lowerCamelCase = act_info[0] == '''gated'''
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCamelCase = '''gelu_new'''
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_lowerCamelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_lowerCamelCase = '''past_encoder_sequence + sequence'''
_lowerCamelCase = {0: '''batch'''}
_lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='''inputs''' )
return common_inputs
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 13
| 638
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 638
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 1
|
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> None:
"""simple docstring"""
_lowerCamelCase = {} # Mapping from char to TrieNode
_lowerCamelCase = False
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
for word in words:
self.insert(A_ )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = self
for char in word:
if char not in curr.nodes:
_lowerCamelCase = TrieNode()
_lowerCamelCase = curr.nodes[char]
_lowerCamelCase = True
def UpperCamelCase_ ( self , A_ ) -> bool:
"""simple docstring"""
_lowerCamelCase = self
for char in word:
if char not in curr.nodes:
return False
_lowerCamelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCamelCase = False
return len(curr.nodes ) == 0
_lowerCamelCase = word[index]
_lowerCamelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCamelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
if node.is_leaf:
print(__UpperCAmelCase , end=''' ''' )
for key, value in node.nodes.items():
print_words(__UpperCAmelCase , word + key )
def __magic_name__( ) -> bool:
'''simple docstring'''
_lowerCamelCase = '''banana bananas bandana band apple all beast'''.split()
_lowerCamelCase = TrieNode()
root.insert_many(__UpperCAmelCase )
# print_words(root, "")
assert all(root.find(__UpperCAmelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
print(str(__UpperCAmelCase ) , '''works!''' if passes else '''doesn\'t work :(''' )
def __magic_name__( ) -> None:
'''simple docstring'''
assert test_trie()
def __magic_name__( ) -> None:
'''simple docstring'''
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 638
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 1
|
snake_case__ = range(2, 20 + 1)
snake_case__ = [10**k for k in range(ks[-1] + 1)]
snake_case__ = {}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = sum(a_i[j] for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ) )
_lowerCamelCase = sum(a_i[j] * base[j] for j in range(min(len(__UpperCAmelCase ) , __UpperCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase = 0, 0
_lowerCamelCase = n - i
_lowerCamelCase = memo.get(__UpperCAmelCase )
if sub_memo is not None:
_lowerCamelCase = sub_memo.get(__UpperCAmelCase )
if jumps is not None and len(__UpperCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase = -1
for _k in range(len(__UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase = diff + c
for j in range(min(__UpperCAmelCase , len(__UpperCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
if new_c > 0:
add(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_lowerCamelCase = []
else:
_lowerCamelCase = {c: []}
_lowerCamelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase = next_term(__UpperCAmelCase , k - 1 , i + dn , __UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase = compute(__UpperCAmelCase , __UpperCAmelCase , i + dn , __UpperCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase = 0
while j < len(__UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0, 0, 0
for j in range(len(__UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase = ds_c + ds_b
diff += addend
_lowerCamelCase = 0
for j in range(__UpperCAmelCase ):
_lowerCamelCase = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return diff, i - start_i
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
_lowerCamelCase = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
_lowerCamelCase = addend // 10 + quotient
else:
_lowerCamelCase = s
_lowerCamelCase = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase = divmod(__UpperCAmelCase , 10 )
digits.append(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase = 10**15 ) -> int:
'''simple docstring'''
_lowerCamelCase = [1]
_lowerCamelCase = 1
_lowerCamelCase = 0
while True:
_lowerCamelCase , _lowerCamelCase = next_term(__UpperCAmelCase , 20 , i + dn , __UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase = 0
for j in range(len(__UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 638
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ) -> Dict:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = ViTMAEModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ )
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(A_ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A_ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = ViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(A_ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = torch.from_numpy(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = pt_noise
super().check_pt_tf_models(A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
_lowerCamelCase = outputs[0].cpu().numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
_lowerCamelCase = model_class.from_pretrained(A_ )
model.to(A_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
# Make sure we don't have nans
_lowerCamelCase = after_outputs[0].cpu().numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ViTMAEModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __magic_name__( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(A_ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**A_ , noise=torch.from_numpy(A_ ).to(device=A_ ) )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , A_ )
_lowerCamelCase = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(A_ ) , atol=1E-4 ) )
| 638
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case__ = '\nHuman: <<task>>\n\nAssistant: '
snake_case__ = 'huggingface-tools/default-prompts'
snake_case__ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="run" ) -> Optional[Any]:
'''simple docstring'''
if prompt_or_repo_id is None:
_lowerCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __UpperCAmelCase ) is not None:
return prompt_or_repo_id
_lowerCamelCase = cached_file(
__UpperCAmelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 638
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 1
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = AutoencoderKL
A_ = 'sample'
A_ = 1E-2
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = 4
_lowerCamelCase = 3
_lowerCamelCase = (32, 32)
_lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
return {"sample": image}
@property
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# enable deterministic behavior for gradient checkpointing
_lowerCamelCase , _lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCamelCase = self.model_class(**A_ )
model.to(A_ )
assert not model.is_gradient_checkpointing and model.training
_lowerCamelCase = model(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCamelCase = torch.randn_like(A_ )
_lowerCamelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCamelCase = self.model_class(**A_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(A_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCamelCase = model_a(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCamelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCamelCase = dict(model.named_parameters() )
_lowerCamelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(A_ )
_lowerCamelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
_lowerCamelCase = model.to(A_ )
model.eval()
if torch_device == "mps":
_lowerCamelCase = torch.manual_seed(0 )
else:
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCamelCase = image.to(A_ )
with torch.no_grad():
_lowerCamelCase = model(A_ , sample_posterior=A_ , generator=A_ ).sample
_lowerCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCamelCase = torch.tensor(
[
-4.0_0_7_8E-0_1,
-3.8_3_2_3E-0_4,
-1.2_6_8_1E-0_1,
-1.1_4_6_2E-0_1,
2.0_0_9_5E-0_1,
1.0_8_9_3E-0_1,
-8.8_2_4_7E-0_2,
-3.0_3_6_1E-0_1,
-9.8_6_4_4E-0_3,
] )
elif torch_device == "cpu":
_lowerCamelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCamelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) )
@slow
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
return F'gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy'
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self , A_=0 , A_=(4, 3, 5_12, 5_12) , A_=False ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = torch.floataa if fpaa else torch.floataa
_lowerCamelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(A_ , A_ ) ) ).to(A_ ).to(A_ )
return image
def UpperCamelCase_ ( self , A_="CompVis/stable-diffusion-v1-4" , A_=False ) -> Any:
"""simple docstring"""
_lowerCamelCase = '''fp16''' if fpaa else None
_lowerCamelCase = torch.floataa if fpaa else torch.floataa
_lowerCamelCase = AutoencoderKL.from_pretrained(
A_ , subfolder='''vae''' , torch_dtype=A_ , revision=A_ , )
model.to(A_ ).eval()
return model
def UpperCamelCase_ ( self , A_=0 ) -> int:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(A_ )
return torch.Generator(device=A_ ).manual_seed(A_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model()
_lowerCamelCase = self.get_sd_image(A_ )
_lowerCamelCase = self.get_generator(A_ )
with torch.no_grad():
_lowerCamelCase = model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
_lowerCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model(fpaa=A_ )
_lowerCamelCase = self.get_sd_image(A_ , fpaa=A_ )
_lowerCamelCase = self.get_generator(A_ )
with torch.no_grad():
_lowerCamelCase = model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
_lowerCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCamelCase = torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model()
_lowerCamelCase = self.get_sd_image(A_ )
with torch.no_grad():
_lowerCamelCase = model(A_ ).sample
assert sample.shape == image.shape
_lowerCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model()
_lowerCamelCase = self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCamelCase = model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_lowerCamelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCamelCase = torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase_ ( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model(fpaa=A_ )
_lowerCamelCase = self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
_lowerCamelCase = model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_lowerCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCamelCase = torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model(fpaa=A_ )
_lowerCamelCase = self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
_lowerCamelCase = model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCamelCase = model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(A_ , A_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model()
_lowerCamelCase = self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCamelCase = model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCamelCase = model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCamelCase_ ( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_sd_vae_model()
_lowerCamelCase = self.get_sd_image(A_ )
_lowerCamelCase = self.get_generator(A_ )
with torch.no_grad():
_lowerCamelCase = model.encode(A_ ).latent_dist
_lowerCamelCase = dist.sample(generator=A_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCamelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCamelCase = torch.tensor(A_ )
_lowerCamelCase = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(A_ , A_ , atol=A_ )
| 638
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 3 , A_ = 3 , A_ = ("DownEncoderBlock2D",) , A_ = ("UpDecoderBlock2D",) , A_ = (64,) , A_ = 1 , A_ = "silu" , A_ = 3 , A_ = 32 , A_ = 2_56 , A_ = 32 , A_ = None , A_ = 0.18215 , A_ = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_lowerCamelCase = Encoder(
in_channels=A_ , out_channels=A_ , down_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , act_fn=A_ , norm_num_groups=A_ , double_z=A_ , )
_lowerCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCamelCase = nn.Convad(A_ , A_ , 1 )
_lowerCamelCase = VectorQuantizer(A_ , A_ , beta=0.25 , remap=A_ , sane_index_shape=A_ )
_lowerCamelCase = nn.Convad(A_ , A_ , 1 )
# pass init params to Decoder
_lowerCamelCase = Decoder(
in_channels=A_ , out_channels=A_ , up_block_types=A_ , block_out_channels=A_ , layers_per_block=A_ , act_fn=A_ , norm_num_groups=A_ , norm_type=A_ , )
@apply_forward_hook
def UpperCamelCase_ ( self , A_ , A_ = True ) -> VQEncoderOutput:
"""simple docstring"""
_lowerCamelCase = self.encoder(A_ )
_lowerCamelCase = self.quant_conv(A_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=A_ )
@apply_forward_hook
def UpperCamelCase_ ( self , A_ , A_ = False , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
# also go through quantization layer
if not force_not_quantize:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.quantize(A_ )
else:
_lowerCamelCase = h
_lowerCamelCase = self.post_quant_conv(A_ )
_lowerCamelCase = self.decoder(A_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
def UpperCamelCase_ ( self , A_ , A_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
_lowerCamelCase = sample
_lowerCamelCase = self.encode(A_ ).latents
_lowerCamelCase = self.decode(A_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
| 638
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=A_ , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
# configuration for running training on smdistributed Model Parallel
_lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
_lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
_lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
_lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=A_ , py_version='''py36''' , )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(A_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
# create estimator
_lowerCamelCase = self.create_estimator(A_ )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , A_ )
| 638
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> int:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class UpperCamelCase ( metaclass=__lowercase ):
'''simple docstring'''
A_ = ['flax']
def __init__( self , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase_ ( cls , *A_ , **A_ ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
| 638
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = RobertaTokenizer
A_ = RobertaTokenizerFast
A_ = True
A_ = {'cls_token': '<s>'}
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase = {'''unk_token''': '''<unk>'''}
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def UpperCamelCase_ ( self , **A_ ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = '''lower newer'''
return input_text, output_text
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase = tokenizer.tokenize(A_ ) # , add_prefix_space=True)
self.assertListEqual(A_ , A_ )
_lowerCamelCase = tokens + [tokenizer.unk_token]
_lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=A_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=A_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.tokenizer_class.from_pretrained('''roberta-base''' )
_lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ )
_lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = '''Encode this sequence.'''
_lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A_ , A_ )
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A_ , A_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A_ , A_ )
# Testing spaces after special tokens
_lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A_ , lstrip=A_ , rstrip=A_ )} ) # mask token has a left space
_lowerCamelCase = tokenizer.convert_tokens_to_ids(A_ )
_lowerCamelCase = '''Encode <mask> sequence'''
_lowerCamelCase = '''Encode <mask>sequence'''
_lowerCamelCase = tokenizer.encode(A_ )
_lowerCamelCase = encoded.index(A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A_ , A_ )
_lowerCamelCase = tokenizer.encode(A_ )
_lowerCamelCase = encoded.index(A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
_lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
_lowerCamelCase = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
_lowerCamelCase = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , A_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , A_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , A_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase = F'{text_of_1_token} {text_of_1_token}'
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ) + 1, 1 + len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
| 638
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = 13
_lowerCamelCase = 7
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = 99
_lowerCamelCase = 32
_lowerCamelCase = 2
_lowerCamelCase = 4
_lowerCamelCase = 37
_lowerCamelCase = '''gelu'''
_lowerCamelCase = 0.1
_lowerCamelCase = 0.1
_lowerCamelCase = 5_12
_lowerCamelCase = 16
_lowerCamelCase = 2
_lowerCamelCase = 0.02
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = None
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = TFRoFormerModel(config=A_ )
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase = [input_ids, input_mask]
_lowerCamelCase = model(A_ )
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = TFRoFormerForCausalLM(config=A_ )
_lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowerCamelCase = model(A_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = TFRoFormerForMaskedLM(config=A_ )
_lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.num_labels
_lowerCamelCase = TFRoFormerForSequenceClassification(config=A_ )
_lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.num_choices
_lowerCamelCase = TFRoFormerForMultipleChoice(config=A_ )
_lowerCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.num_labels
_lowerCamelCase = TFRoFormerForTokenClassification(config=A_ )
_lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = TFRoFormerForQuestionAnswering(config=A_ )
_lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = False
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = TFRoFormerModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase = model(A_ )[0]
# TODO Replace vocab size
_lowerCamelCase = 5_00_00
_lowerCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_lowerCamelCase = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ = 1E-4
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = tf.constant([[4, 10]] )
_lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_lowerCamelCase = emba(input_ids.shape )
_lowerCamelCase = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
_lowerCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ = 1E-4
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# 2,12,16,64
_lowerCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_lowerCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_lowerCamelCase = embed_positions([2, 16, 7_68] )[None, None, :, :]
_lowerCamelCase , _lowerCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
_lowerCamelCase = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_lowerCamelCase = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 638
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 1
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
def get_matched_characters(__UpperCAmelCase , __UpperCAmelCase ) -> str:
_lowerCamelCase = []
_lowerCamelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase = int(max(0 , i - limit ) )
_lowerCamelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCAmelCase )
_lowerCamelCase = F'{_stra[0:_stra.index(__UpperCAmelCase )]} {_stra[_stra.index(__UpperCAmelCase ) + 1:]}'
return "".join(__UpperCAmelCase )
# matching characters
_lowerCamelCase = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = len(__UpperCAmelCase )
# transposition
_lowerCamelCase = (
len([(ca, ca) for ca, ca in zip(__UpperCAmelCase , __UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase = 0.0
else:
_lowerCamelCase = (
1
/ 3
* (
match_count / len(__UpperCAmelCase )
+ match_count / len(__UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 638
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = XGLMTokenizer
A_ = XGLMTokenizerFast
A_ = True
A_ = True
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = XGLMTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(A_ ) , 10_08 )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = XGLMTokenizer(A_ , keep_accents=A_ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A_ , f.name )
_lowerCamelCase = XGLMTokenizer(f.name , keep_accents=A_ )
_lowerCamelCase = pickle.dumps(A_ )
pickle.loads(A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase = tokenizer.tokenize(A_ )
_lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(A_ )
_lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = '''Hello World!'''
_lowerCamelCase = [2, 3_12_27, 44_47, 35]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
_lowerCamelCase = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# fmt: off
_lowerCamelCase = {
'''input_ids''': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''facebook/xglm-564M''' , padding=A_ , )
| 638
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.