code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
# fmt: off
lowercase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowercase = dict(zip(A__ ,range(len(A__))))
lowercase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowercase = {'''unk_token''': '''<unk>'''}
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(A__) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A__))
lowercase = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
lowercase = os.path.join(self.tmpdirname ,A__)
with open(self.image_processor_file ,'''w''' ,encoding='''utf-8''') as fp:
json.dump(A__ ,A__)
def A__ ( self ,**A__):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self ,**A__):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self ,**A__):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self):
shutil.rmtree(self.tmpdirname)
def A__ ( self):
lowercase = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
lowercase = [Image.fromarray(np.moveaxis(A__ ,0 ,-1)) for x in image_inputs]
return image_inputs
def A__ ( self):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = self.get_image_processor()
lowercase = CLIPProcessor(tokenizer=A__ ,image_processor=A__)
processor_slow.save_pretrained(self.tmpdirname)
lowercase = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=A__)
lowercase = CLIPProcessor(tokenizer=A__ ,image_processor=A__)
processor_fast.save_pretrained(self.tmpdirname)
lowercase = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer ,A__)
self.assertIsInstance(processor_fast.tokenizer ,A__)
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor ,A__)
self.assertIsInstance(processor_fast.image_processor ,A__)
def A__ ( self):
lowercase = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''')
lowercase = self.get_image_processor(do_normalize=A__ ,padding_value=1.0)
lowercase = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=A__ ,padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer ,A__)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,A__)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = self.prepare_image_inputs()
lowercase = image_processor(A__ ,return_tensors='''np''')
lowercase = processor(images=A__ ,return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = '''lower newer'''
lowercase = processor(text=A__)
lowercase = tokenizer(A__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key])
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ ,images=A__)
self.assertListEqual(list(inputs.keys()) ,['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(A__)
lowercase = tokenizer.batch_decode(A__)
self.assertListEqual(A__ ,A__)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = CLIPProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ ,images=A__)
self.assertListEqual(list(inputs.keys()) ,processor.model_input_names)
| 101
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321
| 0
|
"""simple docstring"""
import sys
def lowerCamelCase (a_ :Dict) -> Optional[Any]:
lowercase :Optional[Any] = len(a_)
lowercase :Union[str, Any] = [[0 for x in range(a_)] for x in range(a_)]
lowercase :int = [[0 for x in range(a_)] for x in range(a_)]
for chain_length in range(2 , a_):
for a in range(1 , n - chain_length + 1):
lowercase :Dict = a + chain_length - 1
lowercase :List[str] = sys.maxsize
for c in range(a_ , a_):
lowercase :Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase :Any = cost
lowercase :Any = c
return matrix, sol
def lowerCamelCase (a_ :Tuple , a_ :List[Any] , a_ :Dict) -> Optional[int]:
if i == j:
print('''A''' + str(a_) , end=''' ''')
else:
print('''(''' , end=''' ''')
print_optiomal_solution(a_ , a_ , optimal_solution[i][j])
print_optiomal_solution(a_ , optimal_solution[i][j] + 1 , a_)
print(''')''' , end=''' ''')
def lowerCamelCase () -> Optional[Any]:
lowercase :Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
lowercase :Dict = len(a_)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase :List[str] = matrix_chain_order(a_)
print('''No. of Operation required: ''' + str(matrix[1][n - 1]))
print_optiomal_solution(a_ , 1 , n - 1)
if __name__ == "__main__":
main()
| 357
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str , a_ :Optional[int]) -> Union[str, Any]:
lowercase :List[str] = set()
lowercase :Dict = []
def parse_line(a_ :Dict):
for line in fp:
if isinstance(a_ , a_):
lowercase :Any = line.decode('''UTF-8''')
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' '''):
# process a single warning and move it to `selected_warnings`.
if len(a_) > 0:
lowercase :int = '''\n'''.join(a_)
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets):
selected_warnings.add(a_)
buffer.clear()
continue
else:
lowercase :Any = line.strip()
buffer.append(a_)
if from_gh:
for filename in os.listdir(a_):
lowercase :Optional[int] = os.path.join(a_ , a_)
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with open(a_) as fp:
parse_line(a_)
else:
try:
with zipfile.ZipFile(a_) as z:
for filename in z.namelist():
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with z.open(a_) as fp:
parse_line(a_)
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""")
return selected_warnings
def lowerCamelCase (a_ :Any , a_ :Optional[int]) -> Any:
lowercase :Tuple = set()
lowercase :Dict = [os.path.join(a_ , a_) for p in os.listdir(a_) if (p.endswith('''.zip''') or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a_ , a_))
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase (a_ :List[Any]) -> Optional[Any]:
return values.split(''',''')
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 172
| 0
|
def lowerCAmelCase__ ( a__: int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
_UpperCAmelCase = int(a__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
_UpperCAmelCase = 1
_UpperCAmelCase = 2
while i * i <= n:
while n % i == 0:
_UpperCAmelCase = i
n //= i
i += 1
if n > 1:
_UpperCAmelCase = n
return int(a__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
__A : List[Any] = parser.parse_args()
if args.model_type == "roberta":
__A : List[str] = RobertaForMaskedLM.from_pretrained(args.model_name)
__A : Tuple = "roberta"
elif args.model_type == "gpt2":
__A : Dict = GPTaLMHeadModel.from_pretrained(args.model_name)
__A : Optional[int] = "transformer"
__A : Any = model.state_dict()
__A : List[Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__A : int = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__A : List[Any] = F'''{prefix}.embeddings.{w}.weight'''
__A : Any = state_dict[param_name]
for w in ["weight", "bias"]:
__A : int = F'''{prefix}.embeddings.LayerNorm.{w}'''
__A : List[Any] = state_dict[param_name]
# Transformer Blocks #
__A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__A : Any = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
__A : Any = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__A : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__A : Any = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__A : Any = state_dict[F'''lm_head.dense.{w}''']
__A : str = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__A : Optional[Any] = state_dict[F'''{prefix}.ln_f.{w}''']
__A : Dict = state_dict["lm_head.weight"]
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 89
|
'''simple docstring'''
import requests
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {"""Content-Type""": """application/json"""}
lowerCAmelCase_ : Union[str, Any] = requests.post(A__ , json={"""text""": message_body} , headers=A__ )
if response.status_code != 2_00:
lowerCAmelCase_ : Dict = (
"""Request to slack returned an error """
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(A__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 89
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : str = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Dict , _lowercase : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ):
__UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
# set architectures equal to `None`
__UpperCAmelCase = None
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Any ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def a ( self : List[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowercase : str ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 332
| 1
|
import os
from datetime import datetime as dt
from github import Github
_A = [
'good first issue',
'feature request',
'wip',
]
def _UpperCAmelCase ( ):
__UpperCamelCase =Github(os.environ['GITHUB_TOKEN'] )
__UpperCamelCase =g.get_repo('huggingface/accelerate' )
__UpperCamelCase =repo.get_issues(state='open' )
for issue in open_issues:
__UpperCamelCase =sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE__ : i.created_at , reverse=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =comments[0] if len(SCREAMING_SNAKE_CASE__ ) > 0 else None
__UpperCamelCase =dt.utcnow()
__UpperCamelCase =(current_time - issue.updated_at).days
__UpperCamelCase =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 117
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__()
__UpperCamelCase =torchvision.models.resnetaaa(pretrained=A_ )
__UpperCamelCase =list(model.children() )[:-2]
__UpperCamelCase =nn.Sequential(*A_ )
__UpperCamelCase =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self , A_ ) -> int:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__UpperCamelCase =self.pool(self.model(A_ ) )
__UpperCamelCase =torch.flatten(A_ , start_dim=2 )
__UpperCamelCase =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =[json.loads(A_ ) for l in open(A_ )]
__UpperCamelCase =os.path.dirname(A_ )
__UpperCamelCase =tokenizer
__UpperCamelCase =labels
__UpperCamelCase =len(A_ )
__UpperCamelCase =max_seq_length
__UpperCamelCase =transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =sentence[0], sentence[1:-1], sentence[-1]
__UpperCamelCase =sentence[: self.max_seq_length]
__UpperCamelCase =torch.zeros(self.n_classes )
__UpperCamelCase =1
__UpperCamelCase =Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
__UpperCamelCase =self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self ) -> List[str]:
__UpperCamelCase =Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =[len(row['sentence'] ) for row in batch]
__UpperCamelCase , __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =input_row['sentence']
__UpperCamelCase =1
__UpperCamelCase =torch.stack([row['image'] for row in batch] )
__UpperCamelCase =torch.stack([row['label'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_start_token'] for row in batch] )
__UpperCamelCase =torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCAmelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCAmelCase ( ):
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 117
| 1
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase :
def __init__(self : str , snake_case__ : int ) -> None:
'''simple docstring'''
snake_case : str = num_of_nodes
snake_case : list[list[int]] = []
snake_case : dict[int, int] = {}
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case : Union[str, Any] = self.find_component(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
snake_case : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
snake_case : List[Any] = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> None:
'''simple docstring'''
snake_case : Dict = []
snake_case : Any = 0
snake_case : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case , snake_case , snake_case : Dict = edge
snake_case : List[str] = self.m_component[u]
snake_case : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
snake_case , snake_case , snake_case : List[str] = edge
snake_case : Tuple = self.m_component[u]
snake_case : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
snake_case : Any = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 0
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCamelCase_ =1.054571817e-34 # unit of ℏ : J * s
UpperCamelCase_ =3e8 # unit of c : m * s^-1
def a_ ( _lowercase , _lowercase , _lowercase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
_UpperCamelCase : str = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCamelCase : str = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCamelCase : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = AltDiffusionPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self : int ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=3_2, )
_UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCAmelCase__, set_alpha_to_one=lowerCAmelCase__, )
torch.manual_seed(0 )
_UpperCamelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_UpperCamelCase : str = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, projection_dim=3_2, intermediate_size=3_7, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5_0_0_2, )
_UpperCamelCase : List[Any] = CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_UpperCamelCase : str = 7_7
_UpperCamelCase : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case ( self : Dict, lowerCAmelCase__ : Any, lowerCAmelCase__ : int=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('''mps''' ):
_UpperCamelCase : Any = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : int = self.get_dummy_components()
torch.manual_seed(0 )
_UpperCamelCase : Any = RobertaSeriesConfig(
hidden_size=3_2, project_dim=3_2, intermediate_size=3_7, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, vocab_size=5_0_0_2, )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCamelCase : Tuple = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCamelCase : str = text_encoder
_UpperCamelCase : List[Any] = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCamelCase : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = '''A photo of an astronaut'''
_UpperCamelCase : Any = alt_pipe(**lowerCAmelCase__ )
_UpperCamelCase : Any = output.images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCamelCase : List[Any] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Optional[Any] = self.get_dummy_components()
_UpperCamelCase : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
_UpperCamelCase : int = RobertaSeriesConfig(
hidden_size=3_2, project_dim=3_2, intermediate_size=3_7, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, vocab_size=5_0_0_2, )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCamelCase : Tuple = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCamelCase : int = text_encoder
_UpperCamelCase : str = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCamelCase : List[str] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = alt_pipe(**lowerCAmelCase__ )
_UpperCamelCase : List[str] = output.images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCamelCase : List[Any] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''', safety_checker=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : int = '''A painting of a squirrel eating a burger'''
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Dict = alt_pipe([prompt], generator=lowerCAmelCase__, guidance_scale=6.0, num_inference_steps=2_0, output_type='''np''' )
_UpperCamelCase : Optional[int] = output.images
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCamelCase : List[str] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : Any = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''', subfolder='''scheduler''' )
_UpperCamelCase : Dict = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__ )
_UpperCamelCase : Dict = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = '''A painting of a squirrel eating a burger'''
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = alt_pipe([prompt], generator=lowerCAmelCase__, num_inference_steps=2, output_type='''numpy''' )
_UpperCamelCase : Tuple = output.images
_UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCamelCase : str = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 128
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 153
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = VideoToVideoSDPipeline
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase_ = False
# No `output_type`.
UpperCAmelCase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def snake_case_ (self ) -> List[Any]:
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
UpperCamelCase = CLIPTextModel(__a )
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def snake_case_ (self , __a , __a=0 ) -> Dict:
# 3 frames
UpperCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(__a )
else:
UpperCamelCase = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = VideoToVideoSDPipeline(**__a )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = self.get_dummy_inputs(__a )
UpperCamelCase = "np"
UpperCamelCase = sd_pipe(**__a ).frames
UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCamelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case_ (self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def snake_case_ (self ) -> List[Any]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def snake_case_ (self ) -> Optional[Any]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def snake_case_ (self ) -> Dict:
pass
def snake_case_ (self ) -> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[str]:
UpperCamelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=__a )
UpperCamelCase = video.to("cuda" )
UpperCamelCase = "Spiderman is surfing"
UpperCamelCase = pipe(__a , video=__a , generator=__a , num_inference_steps=3 , output_type="pt" ).frames
UpperCamelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 153
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE :str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE :Optional[Any] = frozenset([] )
def snake_case__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
__magic_name__ = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Any , a__ : Optional[int] , a__ : List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
__magic_name__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = StableDiffusionInpaintPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = sd_pipe(**a__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Tuple ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case__ ( self : List[str] ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case__ ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 356
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 11
__magic_name__ = int('''1''' + '''0''' * digit_len )
for num in range(a , a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a , a ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__magic_name__ = 10
return solutions
def UpperCamelCase ( a = 2 ) -> int:
'''simple docstring'''
__magic_name__ = 1.0
for fraction in fraction_list(a ):
__magic_name__ = Fraction(a )
result *= frac.denominator / frac.numerator
return int(a )
if __name__ == "__main__":
print(solution())
| 98
| 0
|
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = [randint(-1_000 ,1_000 ) for i in range(10 )]
SCREAMING_SNAKE_CASE__ : List[str] = randint(-5_000 ,5_000 )
return (arr, r)
UpperCAmelCase__ : Dict = make_dataset()
def lowercase_ ( _snake_case ,_snake_case ):
for triplet in permutations(lowerCAmelCase__ ,3 ):
if sum(lowerCAmelCase__ ) == target:
return tuple(sorted(lowerCAmelCase__ ) )
return (0, 0, 0)
def lowercase_ ( _snake_case ,_snake_case ):
arr.sort()
SCREAMING_SNAKE_CASE__ : List[Any] = len(lowerCAmelCase__ )
for i in range(n - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
SCREAMING_SNAKE_CASE__ : Any = """
triplet_sum1(*dataset)
"""
SCREAMING_SNAKE_CASE__ : str = """
triplet_sum2(*dataset)
"""
SCREAMING_SNAKE_CASE__ : List[Any] = repeat(setup=lowerCAmelCase__ ,stmt=lowerCAmelCase__ ,repeat=5 ,number=10_000 )
SCREAMING_SNAKE_CASE__ : Tuple = repeat(setup=lowerCAmelCase__ ,stmt=lowerCAmelCase__ ,repeat=5 ,number=10_000 )
return (min(lowerCAmelCase__ ), min(lowerCAmelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ : str = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 25
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowercase = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
lowercase = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f'bert/{name}'
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = tf.dtypes.as_dtype(tensor.dtype )
lowercase = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase = to_tf_var_name(lowerCAmelCase__ )
lowercase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase = torch_tensor.T
lowercase = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = session.run(lowerCAmelCase__ )
print(f'Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}' )
lowercase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
lowercase = parser.parse_args(lowerCAmelCase__ )
lowercase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 101
| 0
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any =logging.get_logger(__name__)
lowerCamelCase : str ={
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __a ( A__ ):
_lowerCAmelCase : int = '''data2vec-audio'''
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : int=7_68 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE : str="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE : Any=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE : List[Any]=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[str]=16 , SCREAMING_SNAKE_CASE : Any=19 , SCREAMING_SNAKE_CASE : Optional[Any]=5 , SCREAMING_SNAKE_CASE : List[str]=0.0_5 , SCREAMING_SNAKE_CASE : Tuple=10 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : str=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : Union[str, Any]="sum" , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Dict=2_56 , SCREAMING_SNAKE_CASE : List[Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE : Any=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE : List[str]=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE : Dict=5_12 , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : int = feat_extract_activation
UpperCamelCase__ : List[Any] = list(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = list(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = list(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = conv_bias
UpperCamelCase__ : Dict = num_conv_pos_embeddings
UpperCamelCase__ : Union[str, Any] = num_conv_pos_embedding_groups
UpperCamelCase__ : Union[str, Any] = conv_pos_kernel_size
UpperCamelCase__ : Union[str, Any] = len(self.conv_dim )
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : Any = hidden_dropout
UpperCamelCase__ : int = attention_dropout
UpperCamelCase__ : List[Any] = activation_dropout
UpperCamelCase__ : Any = feat_proj_dropout
UpperCamelCase__ : List[str] = final_dropout
UpperCamelCase__ : List[Any] = layerdrop
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : Union[str, Any] = mask_time_prob
UpperCamelCase__ : str = mask_time_length
UpperCamelCase__ : Dict = mask_time_min_masks
UpperCamelCase__ : Dict = mask_feature_prob
UpperCamelCase__ : List[str] = mask_feature_length
UpperCamelCase__ : int = mask_feature_min_masks
# ctc loss
UpperCamelCase__ : Union[str, Any] = ctc_loss_reduction
UpperCamelCase__ : Any = ctc_zero_infinity
# adapter
UpperCamelCase__ : str = add_adapter
UpperCamelCase__ : Optional[int] = adapter_kernel_size
UpperCamelCase__ : Any = adapter_stride
UpperCamelCase__ : Any = num_adapter_layers
UpperCamelCase__ : Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ : List[Any] = list(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = list(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = list(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = xvector_output_dim
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return math.prod(self.conv_stride )
| 196
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __a ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ : List[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCamelCase__ : Tuple = {"unk_token": "<unk>"}
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCamelCase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : List[str] = self.get_rust_tokenizer()
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[str] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase__ : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.prepare_image_inputs()
UpperCamelCase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
UpperCamelCase__ : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = "lower newer"
UpperCamelCase__ : int = processor(text=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "lower newer"
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : Tuple = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : Optional[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Dict = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "lower newer"
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 196
| 1
|
from __future__ import annotations
def __lowercase ( _A ) -> list[int]: # This function is recursive
SCREAMING_SNAKE_CASE : List[Any] = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE : List[str] = array[0]
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Dict = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE : Any = longest_subsequence(_A )
if len(_A ) > len(_A ):
SCREAMING_SNAKE_CASE : str = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE : List[Any] = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE : Tuple = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] ="""table-transformer"""
UpperCAmelCase__ : Union[str, Any] =["""past_key_values"""]
UpperCAmelCase__ : Any ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Any=1_0_0 , UpperCAmelCase__ : Optional[Any]=6 , UpperCAmelCase__ : Dict=2_0_4_8 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : Union[str, Any]=2_0_4_8 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="relu" , UpperCAmelCase__ : List[Any]=2_5_6 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : int="sine" , UpperCAmelCase__ : Dict="resnet50" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : int=0.1 , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : Optional[Any] = config_class.from_dict(UpperCAmelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = None, None, None
SCREAMING_SNAKE_CASE : List[Any] = use_timm_backbone
SCREAMING_SNAKE_CASE : List[Any] = backbone_config
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = encoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : Tuple = activation_function
SCREAMING_SNAKE_CASE : int = init_std
SCREAMING_SNAKE_CASE : str = init_xavier_std
SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : Any = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : List[Any] = backbone
SCREAMING_SNAKE_CASE : Optional[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[Any] = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE : List[Any] = class_cost
SCREAMING_SNAKE_CASE : Tuple = bbox_cost
SCREAMING_SNAKE_CASE : Dict = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : List[Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Any ) ->int:
"""simple docstring"""
return self.d_model
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] =version.parse("""1.11""" )
@property
def _lowercase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowercase ( self : Optional[Any] ) ->float:
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self : Tuple ) ->int:
"""simple docstring"""
return 1_2
| 245
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Union[str, Any] = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
__a : str = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Any = tensor[:sequence_length]
else:
__a : List[Any] = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Dict = tensor[:sequence_length]
else:
__a : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
__a : str = ord(lowerCAmelCase__ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
__a : List[str] = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
_SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : int = -100
_SCREAMING_SNAKE_CASE : str = "pt"
def lowerCAmelCase (self : str , snake_case_ : Tuple ):
import torch
__a : Union[str, Any] = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a : Union[str, Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a : List[str] = torch.tensor(batch['''entity_ids'''] ).shape[1]
__a : Tuple = self.tokenizer.padding_side
if padding_side == "right":
__a : Union[str, Any] = [
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
__a : Dict = [
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
__a : Dict = [feature['''ner_tags'''] for feature in features]
__a : Optional[Any] = padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
__a : Union[str, Any] = [feature['''original_entity_spans'''] for feature in features]
__a : Optional[int] = padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
__a : List[str] = {k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 90
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Tuple=37 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = self.vocab_size - 1
def a ( self : Tuple ) -> Any:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
__lowerCAmelCase = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , *SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
__lowerCAmelCase = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
__lowerCAmelCase = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Any ) -> int:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Dict ) -> int:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_SCREAMING_SNAKE_CASE : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False ) -> Any:
__lowerCAmelCase = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = inputs_dict["""labels"""]
__lowerCAmelCase = inputs_dict["""labels"""]
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def a ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = OpenAIGPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=37 )
def a ( self : List[str] ) -> int:
self.config_tester.run_common_tests()
def a ( self : Tuple ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Union[str, Any] ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a ( self : Optional[int] ) -> str:
__lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
__lowerCAmelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCAmelCase = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 229
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : list[int] , snake_case_ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(snake_case_ ) == len(snake_case_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa
# Calculate the determinants of the matrices
__lowerCAmelCase = aa * ba - aa * ba
__lowerCAmelCase = ca * ba - ca * ba
__lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__lowerCAmelCase = determinant_x / determinant
__lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 229
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def lowercase_ ( self : Union[str, Any] , _A : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
UpperCAmelCase__ : List[str] = np.random.RandomState(snake_case_ )
UpperCAmelCase__ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.7_5,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase__ : Tuple = self.get_dummy_inputs()
UpperCAmelCase__ : Union[str, Any] = pipe(**snake_case_ ).images
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ : int = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase__ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase__ : Dict = self.get_dummy_inputs()
UpperCAmelCase__ : int = pipe(**snake_case_ ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ : Tuple = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
UpperCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs() )
UpperCAmelCase__ : List[str] = self.get_dummy_inputs()
UpperCAmelCase__ : Optional[int] = pipe(**snake_case_ ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ : Any = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase__ : List[Any] = pipe(**snake_case_ ).images
UpperCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase__ : Tuple = self.get_dummy_inputs()
UpperCAmelCase__ : Tuple = pipe(**snake_case_ ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ : int = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase__ : List[str] = self.get_dummy_inputs()
UpperCAmelCase__ : List[str] = pipe(**snake_case_ ).images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ : List[str] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : Optional[Any] = False
return options
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase__ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase__ : Dict = """A fantasy landscape, trending on artstation"""
UpperCAmelCase__ : str = np.random.RandomState(0 )
UpperCAmelCase__ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='''np''' , )
UpperCAmelCase__ : str = output.images
UpperCAmelCase__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase__ : Optional[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase__ : List[Any] = init_image.resize((768, 512) )
UpperCAmelCase__ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase__ : Optional[int] = np.random.RandomState(0 )
UpperCAmelCase__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type='''np''' , )
UpperCAmelCase__ : Any = output.images
UpperCAmelCase__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase__ : Tuple = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 371
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCAmelCase__ : List[Any] = add_prefix_space
def lowercase_ ( self : str , _A : Any , _A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 299
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
_UpperCAmelCase = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
A_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCamelCase__ ) != tokenize_chinese_chars
):
A_ : Union[str, Any] = getattr(lowerCamelCase__ , normalizer_state.pop('type' ) )
A_ : str = do_lower_case
A_ : Dict = strip_accents
A_ : str = tokenize_chinese_chars
A_ : Dict = normalizer_class(**lowerCamelCase__ )
A_ : Optional[int] = do_lower_case
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Optional[Any] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 140
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : int , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = path_or_paths
UpperCamelCase__ : List[Any] = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__ ) else '''train'''
UpperCamelCase__ : Optional[Any] = features
UpperCamelCase__ : List[Any] = cache_dir
UpperCamelCase__ : Optional[int] = keep_in_memory
UpperCamelCase__ : int = streaming
UpperCamelCase__ : Union[str, Any] = num_proc
UpperCamelCase__ : List[Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : int , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = features
UpperCamelCase__ : Optional[int] = cache_dir
UpperCamelCase__ : Union[str, Any] = keep_in_memory
UpperCamelCase__ : Tuple = streaming
UpperCamelCase__ : Optional[Any] = num_proc
UpperCamelCase__ : Union[str, Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 146
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Dict =s.rsplit(__lowerCamelCase , __lowerCamelCase )
return new.join(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Any ={}
lowerCamelCase__ : Optional[int] =['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : List[str] =key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict =key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
lowerCamelCase__ : Tuple =rreplace(__lowerCamelCase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
lowerCamelCase__ : Optional[Any] =rreplace(__lowerCamelCase , '''.b''' , '''.bias''' , 1 )
lowerCamelCase__ : Union[str, Any] =value.float()
return upgrade
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=True ):
"""simple docstring"""
from dall_e import Encoder
lowerCamelCase__ : Any =Encoder()
if os.path.exists(__lowerCamelCase ):
lowerCamelCase__ : str =torch.load(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =torch.hub.load_state_dict_from_url(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Any =ckpt.state_dict()
encoder.load_state_dict(__lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Tuple =FlavaImageCodebookConfig.from_pretrained(__lowerCamelCase )
else:
lowerCamelCase__ : List[Any] =FlavaImageCodebookConfig()
lowerCamelCase__ : Union[str, Any] =FlavaImageCodebook(__lowerCamelCase ).eval()
lowerCamelCase__ : Tuple =encoder.state_dict()
lowerCamelCase__ : Dict =upgrade_state_dict(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =hf_model.state_dict()
lowerCamelCase__ : Optional[int] =count_parameters(__lowerCamelCase )
lowerCamelCase__ : Dict =count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_lowercase : Optional[int] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 272
|
"""simple docstring"""
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : List[str] )-> Optional[int]:
lowerCamelCase__ : List[Any] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase__ : Optional[Any] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) )
]
lowerCamelCase__ : Any =defaultdict(lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase__ : List[Any] =(1 << len(lowerCamelCase )) - 1
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : Any )-> Any:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase__ : Optional[int] =self.count_ways_until(lowerCamelCase, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
lowerCamelCase__ : int =total_ways_util
return self.dp[mask][task_no]
def snake_case ( self : Dict, lowerCamelCase : Dict )-> int:
# Store the list of persons for each task
for i in range(len(lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
_lowercase : Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 272
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
snake_case__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case__ : List[Any] = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = 42
class A_ ( _lowerCamelCase ):
def __init__(self :Any , _UpperCamelCase :PriorTransformer , _UpperCamelCase :CLIPVisionModel , _UpperCamelCase :CLIPImageProcessor , _UpperCamelCase :HeunDiscreteScheduler , _UpperCamelCase :ShapERenderer , )-> Union[str, Any]:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :str , _UpperCamelCase :Any , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[Any] , _UpperCamelCase :Union[str, Any] )-> List[str]:
if latents is None:
__A = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__A = latents.to(_UpperCamelCase )
__A = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase (self :Dict , _UpperCamelCase :int=0 )-> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__A = torch.device(f"""cuda:{gpu_id}""" )
__A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def _lowerCAmelCase (self :Optional[Any] )-> Tuple:
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _lowerCAmelCase (self :str , _UpperCamelCase :Optional[int] , _UpperCamelCase :Optional[int] , _UpperCamelCase :str , _UpperCamelCase :int , )-> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
__A = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
__A = self.image_processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
__A = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
__A = self.image_encoder(_UpperCamelCase )['''last_hidden_state''']
__A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__A = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__A = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__(self :Optional[int] , _UpperCamelCase :Union[PIL.Image.Image, List[PIL.Image.Image]] , _UpperCamelCase :int = 1 , _UpperCamelCase :int = 25 , _UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase :Optional[torch.FloatTensor] = None , _UpperCamelCase :float = 4.0 , _UpperCamelCase :int = 64 , _UpperCamelCase :Optional[str] = "pil" , _UpperCamelCase :bool = True , )-> str:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
__A = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
__A = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__A = len(_UpperCamelCase )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}""" )
__A = self._execution_device
__A = batch_size * num_images_per_prompt
__A = guidance_scale > 1.0
__A = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
__A = self.scheduler.timesteps
__A = self.prior.config.num_embeddings
__A = self.prior.config.embedding_dim
__A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__A = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
__A = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
__A , __A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__A , __A = noise_pred.chunk(2 )
__A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__A = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
__A = []
for i, latent in enumerate(_UpperCamelCase ):
print()
__A = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_UpperCamelCase )
__A = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
__A = images.cpu().numpy()
if output_type == "pil":
__A = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 117
|
def _a ( lowerCamelCase: dict ) -> bool:
'''simple docstring'''
__A = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__A = set()
return any(
node not in visited and depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for node in graph )
def _a ( lowerCamelCase: dict , lowerCamelCase: int , lowerCamelCase: set , lowerCamelCase: set ) -> bool:
'''simple docstring'''
visited.add(lowerCamelCase )
rec_stk.add(lowerCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117
| 1
|
from torch import nn
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = class_size
a = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
a = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self.mlp(lowerCamelCase_ )
return logits
| 71
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = LxmertTokenizer
__A = LxmertTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = "UNwant\u00E9d,running"
a = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(lowerCamelCase_ )
a = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """pixel_values"""
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : str = TimmBackboneConfig
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
requires_backends(self , "timm" )
super().__init__(UpperCamelCase__ )
lowerCamelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
lowerCamelCase : List[Any] = getattr(UpperCamelCase__ , "use_pretrained_backbone" , UpperCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase : str = config.out_indices if getattr(UpperCamelCase__ , "out_indices" , UpperCamelCase__ ) is not None else (-1,)
lowerCamelCase : Optional[Any] = timm.create_model(
config.backbone , pretrained=UpperCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase__ , **UpperCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase : List[Any] = self._backbone.return_layers
lowerCamelCase : Optional[int] = {layer["module"]: str(UpperCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase__ )
@classmethod
def _lowercase ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase : Dict = kwargs.pop("config" , TimmBackboneConfig() )
lowerCamelCase : List[Any] = kwargs.pop("use_timm_backbone" , UpperCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
lowerCamelCase : Any = kwargs.pop("num_channels" , config.num_channels )
lowerCamelCase : Tuple = kwargs.pop("features_only" , config.features_only )
lowerCamelCase : Optional[Any] = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
lowerCamelCase : Optional[int] = kwargs.pop("out_indices" , config.out_indices )
lowerCamelCase : Optional[Any] = TimmBackboneConfig(
backbone=UpperCamelCase__ , num_channels=UpperCamelCase__ , features_only=UpperCamelCase__ , use_pretrained_backbone=UpperCamelCase__ , out_indices=UpperCamelCase__ , )
return super()._from_config(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
pass
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase : Any = self._all_layers
lowerCamelCase : Union[str, Any] = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : int = self._return_layers
lowerCamelCase : List[str] = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase : Optional[int] = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[int] = tuple(UpperCamelCase__ )
lowerCamelCase : List[str] = tuple(UpperCamelCase__ ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase : Dict = (feature_maps,)
if output_hidden_states:
lowerCamelCase : List[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase__ , hidden_states=UpperCamelCase__ , attentions=UpperCamelCase__ )
| 48
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 1
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[0] * len(__lowerCamelCase )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : List[Any] =[1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowerCamelCase__ : Tuple =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ : Optional[Any] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
_lowercase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 272
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'SpeechT5FeatureExtractor'
_a = 'SpeechT5Tokenizer'
def __init__( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str )-> Any:
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Tuple, *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> List[str]:
lowerCamelCase__ : List[Any] =kwargs.pop('''audio''', lowerCamelCase )
lowerCamelCase__ : List[str] =kwargs.pop('''text''', lowerCamelCase )
lowerCamelCase__ : int =kwargs.pop('''text_target''', lowerCamelCase )
lowerCamelCase__ : Dict =kwargs.pop('''audio_target''', lowerCamelCase )
lowerCamelCase__ : Any =kwargs.pop('''sampling_rate''', lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] =self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
elif text is not None:
lowerCamelCase__ : List[Any] =self.tokenizer(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if audio_target is not None:
lowerCamelCase__ : List[str] =self.feature_extractor(audio_target=lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple =targets['''input_values''']
elif text_target is not None:
lowerCamelCase__ : Dict =self.tokenizer(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : int =targets['''input_ids''']
else:
lowerCamelCase__ : List[str] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Dict =labels
lowerCamelCase__ : Any =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Dict =decoder_attention_mask
return inputs
def snake_case ( self : int, *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =kwargs.pop('''input_values''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''input_ids''', lowerCamelCase )
lowerCamelCase__ : Optional[Any] =kwargs.pop('''labels''', lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase__ : List[str] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
elif input_ids is not None:
lowerCamelCase__ : Tuple =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase, lowerCamelCase ) and "input_ids" in labels[0]):
lowerCamelCase__ : str =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =targets['''input_ids''']
else:
lowerCamelCase__ : Any =self.feature_extractor.feature_size
lowerCamelCase__ : Optional[Any] =self.feature_extractor.num_mel_bins
lowerCamelCase__ : Optional[int] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =feature_size_hack
lowerCamelCase__ : Tuple =targets['''input_values''']
else:
lowerCamelCase__ : Optional[Any] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Tuple =labels
lowerCamelCase__ : Optional[int] =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Optional[Any] =decoder_attention_mask
return inputs
def snake_case ( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : List[Any] )-> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : List[str], *lowerCamelCase : List[Any], **lowerCamelCase : Tuple )-> int:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
| 272
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = RobertaTokenizer
def __init__( self : Optional[Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]="replace" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : int=True , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
_A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase ) != add_prefix_space:
_A = getattr(__UpperCAmelCase , pre_tok_state.pop("type" ) )
_A = add_prefix_space
_A = pre_tok_class(**__UpperCAmelCase )
_A = add_prefix_space
_A = "post_processor"
_A = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
_A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A = tuple(state["sep"] )
if "cls" in state:
_A = tuple(state["cls"] )
_A = False
if state.get("add_prefix_space" , __UpperCAmelCase ) != add_prefix_space:
_A = add_prefix_space
_A = True
if state.get("trim_offsets" , __UpperCAmelCase ) != trim_offsets:
_A = trim_offsets
_A = True
if changes_to_apply:
_A = getattr(__UpperCAmelCase , state.pop("type" ) )
_A = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
_A = value
def lowerCAmelCase ( self : Any , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = kwargs.get("is_split_into_words" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = kwargs.get("is_split_into_words" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
_A = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ):
'''simple docstring'''
_A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 79
|
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 92
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = IFPipeline
lowerCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> str:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(a__ )
else:
snake_case_ = torch.Generator(device=a__ ).manual_seed(a__ )
snake_case_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
snake_case_ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
snake_case_ , snake_case_ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case_ = None
snake_case_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case_ = IFImgaImgPipeline(**pipe_a.components )
snake_case_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case_ = IFInpaintingPipeline(**pipe_a.components )
snake_case_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def UpperCamelCase_( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 92
| 1
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
lowerCAmelCase_ : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase_ : Optional[Any] = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=__UpperCamelCase , output_all_encodings=__UpperCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , __UpperCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase_ : Dict = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase_ : Tuple = os.path.join(get_home_dir() , "models" )
lowerCAmelCase_ : Union[str, Any] = _load_vocab(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , cls=__UpperCamelCase )
lowerCAmelCase_ : Any = nlp.model.BERTModel(
__UpperCamelCase , len(__UpperCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=__UpperCamelCase , use_token_type_embed=__UpperCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=__UpperCamelCase , use_decoder=__UpperCamelCase , )
original_bort.load_parameters(__UpperCamelCase , cast_dtype=__UpperCamelCase , ignore_extra=__UpperCamelCase )
lowerCAmelCase_ : str = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase_ : int = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(__UpperCamelCase ),
}
lowerCAmelCase_ : Optional[int] = BertConfig.from_dict(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = BertForMaskedLM(__UpperCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__UpperCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Any = hf_param.shape
lowerCAmelCase_ : List[str] = to_torch(params[gluon_param] )
lowerCAmelCase_ : Optional[Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
lowerCAmelCase_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
lowerCAmelCase_ : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
lowerCAmelCase_ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
lowerCAmelCase_ : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase_ : int = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase_ : BertSelfAttention = layer.attention.self
lowerCAmelCase_ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
lowerCAmelCase_ : Optional[Any] = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
lowerCAmelCase_ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
lowerCAmelCase_ : Any = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
lowerCAmelCase_ : Optional[int] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
lowerCAmelCase_ : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
lowerCAmelCase_ : BertSelfOutput = layer.attention.output
lowerCAmelCase_ : int = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
lowerCAmelCase_ : Optional[Any] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
lowerCAmelCase_ : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
lowerCAmelCase_ : Union[str, Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
lowerCAmelCase_ : BertIntermediate = layer.intermediate
lowerCAmelCase_ : Dict = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
lowerCAmelCase_ : List[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
lowerCAmelCase_ : BertOutput = layer.output
lowerCAmelCase_ : Tuple = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
lowerCAmelCase_ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
lowerCAmelCase_ : int = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
lowerCAmelCase_ : Tuple = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase_ : Dict = RobertaTokenizer.from_pretrained("roberta-base" )
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode_plus(__UpperCamelCase )["input_ids"]
# Get gluon output
lowerCAmelCase_ : Optional[int] = mx.nd.array([input_ids] )
lowerCAmelCase_ : List[Any] = original_bort(inputs=__UpperCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__UpperCamelCase )
lowerCAmelCase_ : Tuple = BertModel.from_pretrained(__UpperCamelCase )
hf_bort_model.eval()
lowerCAmelCase_ : Optional[int] = tokenizer.encode_plus(__UpperCamelCase , return_tensors="pt" )
lowerCAmelCase_ : Union[str, Any] = hf_bort_model(**__UpperCamelCase )[0]
lowerCAmelCase_ : Optional[Any] = output_gluon[0].asnumpy()
lowerCAmelCase_ : Any = output_hf[0].detach().numpy()
lowerCAmelCase_ : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase_ : Any = np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , __UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 241
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =UnCLIPImageVariationPipeline
lowerCamelCase : Any =IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
lowerCamelCase : List[str] =IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase : List[str] =[
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
lowerCamelCase : str =False
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
__lowerCamelCase = UnCLIPTextProjModel(**a )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
__lowerCamelCase = UNetaDConditionModel(**a )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(1 )
__lowerCamelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.dummy_decoder
__lowerCamelCase = self.dummy_text_proj
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_super_res_first
__lowerCamelCase = self.dummy_super_res_last
__lowerCamelCase = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
__lowerCamelCase = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
__lowerCamelCase = CLIPImageProcessor(crop_size=32 , size=32 )
__lowerCamelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Tuple , a : int=0 , a : str=True ):
"""simple docstring"""
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(a )
else:
__lowerCamelCase = torch.Generator(device=a ).manual_seed(a )
if pil_image:
__lowerCamelCase = input_image * 0.5 + 0.5
__lowerCamelCase = input_image.clamp(0 , 1 )
__lowerCamelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCamelCase = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**a )
__lowerCamelCase = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
__lowerCamelCase = pipe(**a )
__lowerCamelCase = output.images
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
__lowerCamelCase = pipe(
**a , return_dict=a , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**a )
__lowerCamelCase = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
__lowerCamelCase = pipe(**a )
__lowerCamelCase = output.images
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
__lowerCamelCase = pipe(
**a , return_dict=a , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**a )
__lowerCamelCase = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
__lowerCamelCase = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
__lowerCamelCase = pipe(**a )
__lowerCamelCase = output.images
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
__lowerCamelCase = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
__lowerCamelCase = pipe(
**a , return_dict=a , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__lowerCamelCase = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = torch.device('''cpu''' )
class a__ :
lowerCamelCase : List[str] =1
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**a )
__lowerCamelCase = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase = torch.Generator(device=a ).manual_seed(0 )
__lowerCamelCase = pipe.decoder.dtype
__lowerCamelCase = 1
__lowerCamelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCamelCase = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
__lowerCamelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCamelCase = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
__lowerCamelCase = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
__lowerCamelCase = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
__lowerCamelCase = pipeline_inputs.pop('''image''' )
__lowerCamelCase = pipe.image_encoder(a ).image_embeds
__lowerCamelCase = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCamelCase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = torch_device == '''cpu'''
__lowerCamelCase = True
__lowerCamelCase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCamelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
__lowerCamelCase = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
__lowerCamelCase = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
__lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowerCamelCase = pipeline(
a , generator=a , output_type='''np''' , )
__lowerCamelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(a , a , 15 )
| 237
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="unispeech-sat"
def __init__( self : Dict , a : str=32 , a : Any=7_68 , a : Optional[Any]=12 , a : Optional[int]=12 , a : int=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : List[Any]=0.1 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=0.1 , a : List[Any]=0.1 , a : str=0.02 , a : List[Any]=1e-5 , a : int="group" , a : Union[str, Any]="gelu" , a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , a : int=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Any=1_28 , a : Tuple=16 , a : str=False , a : Optional[Any]=True , a : Dict=0.05 , a : List[Any]=10 , a : Any=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Any=3_20 , a : str=2 , a : List[str]=0.1 , a : List[str]=1_00 , a : List[str]=2_56 , a : str=2_56 , a : Dict=0.1 , a : Optional[Any]="mean" , a : str=False , a : Tuple=False , a : Optional[Any]=2_56 , a : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a : int=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : Union[str, Any]=5_12 , a : Optional[int]=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : int=5_04 , **a : Dict , ):
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237
| 1
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_snake_case : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_snake_case : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_snake_case : str = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any, lowerCAmelCase_ : int, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
__lowerCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""", __a, )
is not None
):
__lowerCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCAmelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
__lowerCAmelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
__lowerCAmelCase = True
if not attribute_used:
__lowerCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCAmelCase = True
elif attribute.endswith('_token_id' ):
__lowerCAmelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
__lowerCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCAmelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
__lowerCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCAmelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCAmelCase = inspect.getsourcefile(__a )
__lowerCAmelCase = os.path.dirname(__a )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCAmelCase = [os.path.join(__a, __a ) for fn in os.listdir(__a ) if fn.startswith('modeling_' )]
# Get the source code strings
__lowerCAmelCase = []
for path in modeling_paths:
if os.path.isfile(__a ):
with open(__a ) as fp:
modeling_sources.append(fp.read() )
__lowerCAmelCase = []
for config_param, default_value in zip(__a, __a ):
# `attributes` here is all the variant names for `config_param`
__lowerCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__a, __a, __a, __a ):
unused_attributes.append(attributes[0] )
return sorted(__a )
def a_ ( ):
__lowerCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda lowerCAmelCase_ : inspect.isclass(__a )
and issubclass(__a, __a )
and inspect.getmodule(__a ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
__lowerCAmelCase = check_config_attributes_being_used(__a )
if len(__a ) > 0:
__lowerCAmelCase = unused_attributes
if len(__a ) > 0:
__lowerCAmelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__a )
if __name__ == "__main__":
check_config_attributes()
| 284
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Dict=None ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : List[Any] = self.model.config
else:
_a : Optional[int] = config
_a : List[str] = data_args
_a : List[Any] = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : Tuple = label_smoothed_nll_loss
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Union[str, Any] = ['bias', 'LayerNorm.weight']
_a : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : Any = Adafactor
_a : Dict = {'scale_parameter': False, 'relative_step': False}
else:
_a : Union[str, Any] = AdamW
_a : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : str = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : List[Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : List[str] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Optional[int] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : Dict ,_a : Dict ,_a : Any ,_a : Dict ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : Union[str, Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Any = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = inputs.pop('labels' )
_a, _a : int = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : int = self._prepare_inputs(_a )
_a : Any = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : int = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : int = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Optional[int] = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : Dict = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Union[str, Any] = tensor
return padded_tensor
| 271
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict=False ):
"""simple docstring"""
__magic_name__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'module.blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'module.blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'module.blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'module.blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'module.blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : int = ''
else:
__magic_name__ : int = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : List[Any] = state_dict.pop(f'module.blocks.{i}.attn.qkv.weight' )
__magic_name__ : Dict = state_dict.pop(f'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : str = in_proj_bias[: config.hidden_size]
__magic_name__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Tuple = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : Dict = dct.pop(lowerCAmelCase )
__magic_name__ : List[Any] = val
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : List[str] = ViTMSNConfig()
__magic_name__ : List[Any] = 1000
__magic_name__ : Optional[int] = 'datasets/huggingface/label-files'
__magic_name__ : Optional[Any] = 'imagenet-1k-id2label.json'
__magic_name__ : Any = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase ) , 'r' ) )
__magic_name__ : Optional[int] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
__magic_name__ : List[Any] = idalabel
__magic_name__ : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__magic_name__ : str = 384
__magic_name__ : Dict = 1536
__magic_name__ : str = 6
elif "l16" in checkpoint_url:
__magic_name__ : Tuple = 1024
__magic_name__ : List[str] = 4096
__magic_name__ : Union[str, Any] = 24
__magic_name__ : Optional[int] = 16
__magic_name__ : List[str] = 0.1
elif "b4" in checkpoint_url:
__magic_name__ : Optional[int] = 4
elif "l7" in checkpoint_url:
__magic_name__ : Tuple = 7
__magic_name__ : str = 1024
__magic_name__ : int = 4096
__magic_name__ : List[str] = 24
__magic_name__ : Tuple = 16
__magic_name__ : Dict = 0.1
__magic_name__ : Optional[int] = ViTMSNModel(lowerCAmelCase )
__magic_name__ : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' )['target_encoder']
__magic_name__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase )
__magic_name__ : Any = create_rename_keys(lowerCAmelCase , base_model=lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , base_model=lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
__magic_name__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__magic_name__ : Optional[Any] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
__magic_name__ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase , image_std=lowerCAmelCase )
__magic_name__ : Any = image_processor(images=lowerCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__magic_name__ : str = model(**lowerCAmelCase )
__magic_name__ : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__magic_name__ : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__magic_name__ : Tuple = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__magic_name__ : str = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__magic_name__ : Union[str, Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__magic_name__ : Optional[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase , atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase :str = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 275
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase )
if number < 0:
return False
__magic_name__ : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
"""simple docstring"""
_a : Union[str, Any] = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 44
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Tuple = create_tensor(lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = gather(lowerCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = [state.process_index]
UpperCAmelCase_: Optional[int] = gather_object(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == state.num_processes, F'{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Tuple = create_tensor(lowerCAmelCase__ )
UpperCAmelCase_: Any = broadcast(lowerCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
if state.is_main_process:
UpperCAmelCase_: List[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase_: List[Any] = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase_: Optional[int] = pad_across_processes(lowerCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
if state.num_processes != 2:
return
UpperCAmelCase_: Dict = create_tensor(lowerCAmelCase__ )
UpperCAmelCase_: int = reduce(lowerCAmelCase__ , """sum""" )
UpperCAmelCase_: Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F'{reduced_tensor} != {truth_tensor}'
def lowerCAmelCase_ (lowerCAmelCase__: List[str] ):
"""simple docstring"""
if state.num_processes != 2:
return
UpperCAmelCase_: Optional[int] = create_tensor(lowerCAmelCase__ )
UpperCAmelCase_: Tuple = reduce(lowerCAmelCase__ , """mean""" )
UpperCAmelCase_: Optional[int] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F'{reduced_tensor} != {truth_tensor}'
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
main()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(lowerCAmelCase__ )
state.print("""testing gather_object""" )
test_gather_object(lowerCAmelCase__ )
state.print("""testing broadcast""" )
test_broadcast(lowerCAmelCase__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(lowerCAmelCase__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(lowerCAmelCase__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
a : Dict = datasets.logging.get_logger(__name__)
a : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
a : int = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
a : List[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Dict=False , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Any=True , lowerCAmelCase__: Union[str, Any]=False , lowerCAmelCase__: List[Any]="dummy_doc" ):
"""simple docstring"""
UpperCAmelCase_: str = {doc: key_lines}
UpperCAmelCase_: str = {doc: sys_lines}
UpperCAmelCase_: Optional[Any] = {}
UpperCAmelCase_: Optional[int] = 0
UpperCAmelCase_: Optional[Any] = 0
UpperCAmelCase_: str = 0
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: Union[str, Any] = 0
UpperCAmelCase_ , UpperCAmelCase_: List[str] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_: List[str] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: Any = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_: Tuple = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase_ , UpperCAmelCase_: str = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase_: Tuple = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Dict , lowerCAmelCase__: int , lowerCAmelCase__: Any , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Tuple = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Any = {}
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: Optional[Any] = 0
for name, metric in metrics:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
UpperCAmelCase_: List[str] = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Dict = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
UpperCAmelCase_: Any = line.split()[5]
if not parse_col == "-":
UpperCAmelCase_: List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def __snake_case (self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ), codebase_urls=["""https://github.com/ns-moosavi/coval"""], reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
], )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> int:
UpperCAmelCase_: Tuple = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
UpperCAmelCase_: str = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase_: Tuple = evaluate(
key_lines=SCREAMING_SNAKE_CASE_, sys_lines=SCREAMING_SNAKE_CASE_, metrics=SCREAMING_SNAKE_CASE_, NP_only=SCREAMING_SNAKE_CASE_, remove_nested=SCREAMING_SNAKE_CASE_, keep_singletons=SCREAMING_SNAKE_CASE_, min_span=SCREAMING_SNAKE_CASE_, )
return score
| 82
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase__ = tuple[int, int]
class __snake_case :
def __init__( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: set[int] = vertices
a__: dict[EdgeT, int] = {
(min(lowercase), max(lowercase)): weight for edge, weight in edges.items()
}
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
self.vertices.add(edge[0])
self.vertices.add(edge[1])
a__: Union[str, Any] = weight
def lowerCamelCase_ ( self) -> Graph:
'''simple docstring'''
a__: Graph = Graph({min(self.vertices)} , {})
a__: EdgeT
a__: int
a__: EdgeT
a__: int
while len(subgraph.vertices) < len(self.vertices):
a__: Union[str, Any] = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a__: List[str] = edge
a__: Tuple = weight
subgraph.add_edge(lowercase , lowercase)
return subgraph
def __a ( _SCREAMING_SNAKE_CASE = "p107_network.txt" ) ->int:
a__: str = os.path.abspath(os.path.dirname(_SCREAMING_SNAKE_CASE ) )
a__: str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: dict[EdgeT, int] = {}
a__: list[str]
a__: int
a__: int
with open(_SCREAMING_SNAKE_CASE ) as f:
a__: Any = f.read().strip().split('\n' )
a__: str = [line.split(',' ) for line in data]
for edgea in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
for edgea in range(_SCREAMING_SNAKE_CASE ):
if adjaceny_matrix[edgea][edgea] != "-":
a__: List[str] = int(adjaceny_matrix[edgea][edgea] )
a__: Graph = Graph(set(range(len(_SCREAMING_SNAKE_CASE ) ) ) , _SCREAMING_SNAKE_CASE )
a__: Graph = graph.prims_algorithm()
a__: int = sum(graph.edges.values() )
a__: int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 290
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290
| 1
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def __lowercase ( _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = np.nan
for i in range(_UpperCamelCase ):
lowercase : str = features[:, labels == i]
lowercase : Dict = data.mean(1 )
# Centralize the data of class i
lowercase : List[Any] = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase, centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase : Optional[Any] = np.dot(_UpperCamelCase, centered_data.T )
return covariance_sum / features.shape[1]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
lowercase : Optional[int] = features.mean(1 )
lowercase : Dict = np.nan
for i in range(_UpperCamelCase ):
lowercase : int = features[:, labels == i]
lowercase : Union[str, Any] = data.shape[1]
lowercase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ), (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T, )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase : Any = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ), (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T, )
return covariance_sum / features.shape[1]
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
if features.any():
lowercase : Dict = features.mean(1 )
# Center the dataset
lowercase : List[str] = features - np.reshape(_UpperCamelCase, (data_mean.size, 1) )
lowercase : str = np.dot(_UpperCamelCase, centered_data.T ) / features.shape[1]
lowercase : int = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase : Any = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase : Optional[Any] = np.dot(filtered_eigenvectors.T, _UpperCamelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='''%(message)s''', force=_UpperCamelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase : List[Any] = eigh(
covariance_between_classes(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), covariance_within_classes(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), )
lowercase : str = eigenvectors[:, ::-1][:, :dimensions]
lowercase : List[Any] = np.linalg.svd(_UpperCamelCase )
lowercase : Dict = svd_matrix[:, 0:dimensions]
lowercase : Dict = np.dot(filtered_svd_matrix.T, _UpperCamelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='''%(message)s''', force=_UpperCamelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __lowercase ( ) ->None:
"""simple docstring"""
lowercase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase : str = np.array([0, 0, 0, 1, 1] )
lowercase : Any = 2
lowercase : int = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
lowercase : Tuple = linear_discriminant_analysis(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if isinstance(_UpperCamelCase, np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __lowercase ( ) ->None:
"""simple docstring"""
lowercase : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase : List[Any] = 2
lowercase : Tuple = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
lowercase : Optional[int] = principal_component_analysis(_UpperCamelCase, _UpperCamelCase )
if not np.allclose(_UpperCamelCase, _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
# flake8: noqa
# Lint as: python3
__a = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 173
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __A( a ):
snake_case_ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Any:
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_snake_case )
return config
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = len(_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__a = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a = pred_prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a = scheduler_class(**_snake_case )
__a = len(_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__a = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a = pred_prev_sample
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_snake_case )
__a = scheduler.timesteps
for i, timestep in enumerate(_snake_case ):
if i == len(_snake_case ) - 1:
__a = -1
else:
__a = timesteps[i + 1]
__a = scheduler.previous_timestep(_snake_case )
__a = prev_t.item()
self.assertEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 51, 0]
with self.assertRaises(_snake_case , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [100, 87, 50, 1, 0]
__a = len(_snake_case )
with self.assertRaises(_snake_case , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_snake_case )
| 6
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase__ ( a , a=10_00 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_A: List[Any] = n - 1
_A: Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_A: List[str] = 0
while count < prec:
_A: Optional[int] = random.randint(2 , n - 1 )
_A: Union[str, Any] = bin_exp_mod(a , a , a )
if b != 1:
_A: Optional[Any] = True
for _ in range(a ):
if b == n - 1:
_A: int = False
break
_A: Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCAmelCase__ : Dict = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 121
| 0
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
def get_masked_lm_array(_A ):
a : Optional[Any] = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : List[str] = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
if "kernel" in name:
a : str = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
def get_encoder_array(_A ):
a : Dict = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : Tuple = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
if "kernel" in name:
a : Optional[int] = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
def get_encoder_layer_array(_A , _A ):
a : Optional[Any] = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : Dict = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
if "kernel" in name:
a : str = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
def get_encoder_attention_layer_array(_A , _A , _A ):
a : str = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : Tuple = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
a : Optional[Any] = array.reshape(UpperCAmelCase__ )
if "kernel" in name:
a : Optional[int] = array.transpose()
return torch.from_numpy(UpperCAmelCase__ )
print(f"""Loading model based on config from {config_path}...""" )
a : Optional[Any] = BertConfig.from_json_file(UpperCAmelCase__ )
a : List[Any] = BertForMaskedLM(UpperCAmelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
a : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
a : BertSelfAttention = layer.attention.self
a : Union[str, Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
a : str = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_query_dense/bias' , self_attn.query.bias.data.shape )
a : List[Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
a : str = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_key_dense/bias' , self_attn.key.bias.data.shape )
a : Tuple = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
a : str = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
a : BertSelfOutput = layer.attention.output
a : List[Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
a : Union[str, Any] = get_encoder_attention_layer_array(
UpperCAmelCase__ , '_output_dense/bias' , self_output.dense.bias.data.shape )
a : int = get_encoder_layer_array(UpperCAmelCase__ , '_attention_layer_norm/gamma' )
a : Optional[Any] = get_encoder_layer_array(UpperCAmelCase__ , '_attention_layer_norm/beta' )
# Intermediate
a : BertIntermediate = layer.intermediate
a : List[Any] = get_encoder_layer_array(UpperCAmelCase__ , '_intermediate_dense/kernel' )
a : Any = get_encoder_layer_array(UpperCAmelCase__ , '_intermediate_dense/bias' )
# Output
a : BertOutput = layer.output
a : Optional[int] = get_encoder_layer_array(UpperCAmelCase__ , '_output_dense/kernel' )
a : Optional[Any] = get_encoder_layer_array(UpperCAmelCase__ , '_output_dense/bias' )
a : Dict = get_encoder_layer_array(UpperCAmelCase__ , '_output_layer_norm/gamma' )
a : Any = get_encoder_layer_array(UpperCAmelCase__ , '_output_layer_norm/beta' )
# Embeddings
a : Union[str, Any] = get_encoder_array('_position_embedding_layer/embeddings' )
a : Any = get_encoder_array('_type_embedding_layer/embeddings' )
a : str = get_encoder_array('_embedding_norm_layer/gamma' )
a : List[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
a : Optional[int] = model.cls.predictions.transform
a : str = get_masked_lm_array('dense/kernel' )
a : List[Any] = get_masked_lm_array('dense/bias' )
a : int = get_masked_lm_array('layer_norm/gamma' )
a : Optional[Any] = get_masked_lm_array('layer_norm/beta' )
a : int = get_masked_lm_array('embedding_table' )
# Pooling
a : int = BertPooler(config=UpperCAmelCase__ )
a : BertPooler = get_encoder_array('_pooler_layer/kernel' )
a : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(UpperCAmelCase__ )
# Integration test - should load without any errors ;)
a : Dict = BertForMaskedLM.from_pretrained(UpperCAmelCase__ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
lowerCAmelCase: Tuple = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 371
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase: List[str] = 'examples/'
lowerCAmelCase: List[Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase: str = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase: str = 'README.md'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a , a : Tuple = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , _A )
a : Dict = re_pattern.sub(_A , _A )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_A )
def lowerCamelCase__ ( _A ):
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern='examples' )
def lowerCamelCase__ ( _A , _A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def lowerCamelCase__ ( ):
a : Tuple = '🤗 Transformers currently provides the following architectures'
a : Any = '1. Want to contribute a new model?'
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.readlines()
# Find the start of the list.
a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : List[Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
def lowerCamelCase__ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : Union[str, Any] = f.read()
a : Tuple = REPLACE_PATTERNS['init'][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def lowerCamelCase__ ( _A=False ):
a : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Any = default_version.base_version
elif patch:
a : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a : List[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
a : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
a : int = get_version()
a : Any = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a : int = current_version.base_version
# Check with the user we got that right.
a : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
a : Optional[int] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_A )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 96
| 0
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parent
lowerCAmelCase__ :List[str] = batch_size
lowerCAmelCase__ :List[Any] = seq_length
lowerCAmelCase__ :str = is_training
lowerCAmelCase__ :Dict = use_input_mask
lowerCAmelCase__ :Union[str, Any] = vocab_size
lowerCAmelCase__ :Union[str, Any] = hidden_size
lowerCAmelCase__ :Optional[Any] = num_hidden_layers
lowerCAmelCase__ :List[str] = num_attention_heads
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :Union[str, Any] = hidden_act
lowerCAmelCase__ :int = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :Union[str, Any] = max_position_embeddings
lowerCAmelCase__ :Union[str, Any] = initializer_range
lowerCAmelCase__ :List[Any] = use_labels
lowerCAmelCase__ :List[Any] = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Tuple = None
if self.use_input_mask:
lowerCAmelCase__ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case ( self ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ :Dict = True
lowerCAmelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = BertGenerationEncoder(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
lowerCAmelCase__ :int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :str = True
lowerCAmelCase__ :List[str] = BertGenerationEncoder(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[str] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
lowerCAmelCase__ :str = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = True
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Optional[int] = BertGenerationDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
# first forward pass
lowerCAmelCase__ :int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
lowerCAmelCase__ :Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ :Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ :Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ :List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ :Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0]
lowerCAmelCase__ :int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ :Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ :Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ :Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = BertGenerationDecoder(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.prepare_config_and_inputs()
lowerCAmelCase__ :Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ :Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ :Optional[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = BertGenerationEncoderTester(self )
lowerCAmelCase__ :int = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ :List[str] = 'bert'
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ :int = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowerCAmelCase__ :Tuple = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )[0]
lowerCAmelCase__ :Any = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowerCAmelCase__ :str = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
lowerCAmelCase__ :Any = model(__UpperCAmelCase )[0]
lowerCAmelCase__ :Tuple = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 293
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""OwlViTFeatureExtractor"""]
__A = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase : List[str] =None
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Any ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : Tuple ={
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : Optional[int] ={
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
lowerCAmelCase : str ='''▁'''
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = AlbertTokenizer
def __init__( self : Optional[Any] , lowercase : Union[str, Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=True , lowercase : Dict=True , lowercase : List[Any]=False , lowercase : Any="[CLS]" , lowercase : Any="[SEP]" , lowercase : Union[str, Any]="<unk>" , lowercase : str="[SEP]" , lowercase : int="<pad>" , lowercase : str="[CLS]" , lowercase : List[str]="[MASK]" , **lowercase : str , ):
"""simple docstring"""
lowercase_ :int = (
AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase , normalized=lowercase )
if isinstance(lowercase , lowercase )
else mask_token
)
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , **lowercase , )
lowercase_ :List[Any] = do_lower_case
lowercase_ :Optional[int] = remove_space
lowercase_ :Optional[Any] = keep_accents
lowercase_ :List[str] = vocab_file
lowercase_ :Union[str, Any] = False if not self.vocab_file else True
def lowercase__ ( self : Optional[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
lowercase_ :Any = [self.sep_token_id]
lowercase_ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
lowercase_ :Dict = [self.sep_token_id]
lowercase_ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ :Dict = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 147
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
if isinstance(__lowerCamelCase ,np.ndarray ):
return list(tensor.shape )
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowercase_ :Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 ,axis=__lowerCamelCase ,name=__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str]=1e-5 ,__lowerCamelCase : List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ :List[str] = tf.nn.moments(__lowerCamelCase ,axes=[axis] ,keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ :Union[str, Any] = [1] * inputs.shape.rank
lowercase_ :Optional[Any] = shape_list(__lowerCamelCase )[axis]
lowercase_ :List[str] = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Dict = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ :Union[str, Any] = tf.nn.batch_normalization(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,offset=__lowerCamelCase ,scale=__lowerCamelCase ,variance_epsilon=__lowerCamelCase ,)
return outputs
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Dict=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
lowercase_ :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ :List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ):
if not isinstance(__lowerCamelCase ,tf.Tensor ):
lowercase_ :str = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ :List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ :Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ :str = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : str = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase ,tf.cast(__lowerCamelCase ,dtype=tensor.dtype ) ,message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ):
lowercase_ :int = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ :Union[str, Any] = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
lowercase_ :Union[str, Any] = np.asarray(__lowerCamelCase )
lowercase_ :Optional[int] = 1
lowercase_ :int = np.array_split(__lowerCamelCase ,__lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ :List[Any] = np.array_split(__lowerCamelCase ,__lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowercase_ :int = chunk_data
else:
lowercase_ :Tuple = data
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ):
if name in group.attrs:
lowercase_ :Optional[Any] = [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs[name]]
else:
lowercase_ :List[str] = []
lowercase_ :str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def _expand_single_ad_tensor(__lowerCamelCase : Tuple ):
if isinstance(__lowerCamelCase ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCamelCase )
| 147
| 1
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> Optional[int]:
'''simple docstring'''
A__ = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A__ = torch.load(hf_hub_download(repo_id=snake_case__ , filename="pytorch_model.bin" ) )
A__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A__ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A__ = tensor_value
A__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A__ = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 68
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''deit'''
def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4
| 74
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=3 , lowerCAmelCase_=32 , lowerCAmelCase_=3 , lowerCAmelCase_=10 , lowerCAmelCase_=[8, 16, 32, 64] , lowerCAmelCase_=[1, 1, 2, 1] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=["stage2", "stage3", "stage4"] , lowerCAmelCase_=[2, 3, 4] , lowerCAmelCase_=1 , ) -> List[Any]:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(_a )
_A = out_features
_A = out_indices
_A = num_groups
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = BitModel(config=_a )
model.to(_a )
model.eval()
_A = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = self.num_labels
_A = BitForImageClassification(_a )
model.to(_a )
model.eval()
_A = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = BitBackbone(config=_a )
model.to(_a )
model.eval()
_A = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A = None
_A = BitBackbone(config=_a )
model.to(_a )
model.eval()
_A = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase :Optional[Any] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase :Any = False
lowerCamelCase :Any = False
lowerCamelCase :Any = False
lowerCamelCase :List[Any] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> str:
_A = BitModelTester(self )
_A = ConfigTester(self , config_class=_a , has_text_modality=_a )
def UpperCAmelCase ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ) -> str:
return
@unittest.skip(reason="""Bit does not output attentions""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_a )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_a , _a ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A = self.model_tester.prepare_config_and_inputs_for_common()
_A = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A = layer_type
_A = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
def UpperCAmelCase ( self ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def UpperCAmelCase ( self ) -> List[str]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def snake_case ( ) -> Dict:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> str:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self ) -> str:
_A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A = model(**_a )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
_A = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class a ( __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :str = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase :Union[str, Any] = BitConfig
lowerCamelCase :List[Any] = False
def UpperCAmelCase ( self ) -> Any:
_A = BitModelTester(self )
| 365
|
import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 81
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 119
|
from __future__ import annotations
__UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase : dict[str, str | None] = {}
UpperCamelCase : Union[str, Any] = source_vertex
def snake_case_ ( self ) -> None:
UpperCamelCase : str = {self.source_vertex}
UpperCamelCase : str = None
UpperCamelCase : int = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase : Optional[Any] = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
UpperCamelCase : Union[str, Any] = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 119
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger()
@dataclass
class a__ :
_a : nn.Module
_a : List[nn.Module] = field(default_factory=snake_case__ )
_a : list = field(default_factory=snake_case__ )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(_A , nn.Convad ) or isinstance(_A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_A )
def __call__( self , _A ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_A )
[x.remove() for x in self.handles]
return self
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return list(filter(lambda _A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a__ :
_a : nn.Module
_a : nn.Module
_a : int = 1
_a : List = field(default_factory=snake_case__ )
_a : List = field(default_factory=snake_case__ )
_a : bool = True
def __call__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = Tracker(self.dest )(_A ).parametrized
__lowerCAmelCase = Tracker(self.src )(_A ).parametrized
__lowerCAmelCase = list(filter(lambda _A : type(_A ) not in self.src_skip , _A ) )
__lowerCAmelCase = list(filter(lambda _A : type(_A ) not in self.dest_skip , _A ) )
if len(_A ) != len(_A ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(_A )} operations while"""
f""" destination module has {len(_A )}.""" )
for dest_m, src_m in zip(_A , _A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class a__ ( nn.Module ):
def __init__( self , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
__lowerCAmelCase = len(_A ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
__lowerCAmelCase = nn.ModuleDict(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return get_trunk_forward_outputs(
_A , out_feat_keys=_A , feature_blocks=self._feature_blocks , )
class a__ ( snake_case__ ):
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , _A ):
"""simple docstring"""
if x not in self:
__lowerCAmelCase = self.convert_name_to_timm(_A )
__lowerCAmelCase = partial(lambda: (timm.create_model(_A , pretrained=_A ).eval(), None) )
else:
__lowerCAmelCase = super().__getitem__(_A )
return val
class a__ ( snake_case__ ):
def __getitem__( self , _A ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
__lowerCAmelCase = RegNetModel
else:
__lowerCAmelCase = RegNetForImageClassification
return val
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Tuple[str, str]] ):
for from_key, to_key in keys:
__lowerCAmelCase = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : bool = True , ):
print(F"""Converting {name}...""" )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase = from_model_func()
__lowerCAmelCase = our_model_func(SCREAMING_SNAKE_CASE_ ).eval()
__lowerCAmelCase = ModuleTransfer(src=SCREAMING_SNAKE_CASE_ , dest=SCREAMING_SNAKE_CASE_ , raise_if_mismatch=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(SCREAMING_SNAKE_CASE_ )
if from_state_dict is not None:
__lowerCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__lowerCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
__lowerCAmelCase = manually_copy_vissl_head(SCREAMING_SNAKE_CASE_ , our_model.state_dict() , SCREAMING_SNAKE_CASE_ )
our_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = our_model(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else our_outputs.last_hidden_state
)
__lowerCAmelCase = from_model(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = from_output[-1] if type(SCREAMING_SNAKE_CASE_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__lowerCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = 2_24 if "seer" not in name else 3_84
# we can use the convnext one
__lowerCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
print(F"""Pushed {name}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = True ):
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = 10_00
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__lowerCAmelCase = NameToOurModelFuncMap()
__lowerCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , model_dir=str(SCREAMING_SNAKE_CASE_ ) , map_location="cpu" )
__lowerCAmelCase = model_func()
# check if we have a head, if yes add it
__lowerCAmelCase = files["classy_state_dict"]["base_model"]["model"]
__lowerCAmelCase = model_state_dict["trunk"]
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
return model.eval(), model_state_dict["heads"]
# pretrained
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase = partial(
SCREAMING_SNAKE_CASE_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 102
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase__ = """\
"""
UpperCamelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = 1_6 , _A = True , _A=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = "cuda"
else:
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_A )
__lowerCAmelCase = model.to(_A )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , return_tensors="pt" , return_attention_mask=_A , ).to(_A )
__lowerCAmelCase = encodings["input_ids"]
__lowerCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_A ) , _A ) ):
__lowerCAmelCase = min(start_index + batch_size , len(_A ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_A )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_A ), attn_mask] , dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(_A , attention_mask=_A ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_A )}
| 102
| 1
|
import math
def __lowercase ( a__ ) -> Any:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( a__ = 1_00_01 ) -> Tuple:
try:
__SCREAMING_SNAKE_CASE = int(lowerCAmelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 2
while len(lowerCAmelCase_ ) < nth:
if is_prime(lowerCAmelCase_ ):
primes.append(lowerCAmelCase_ )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase_ ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 257
|
def snake_case_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
__lowercase : Any = []
def generate(lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase , __lowercase : List[str] = arr[k - 1], arr[i]
else: # k is odd
__lowercase , __lowercase : Any = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase_ )
generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return res
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 233
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A = True
except (ImportError, AttributeError):
A = object
def __A ( *a_ :Union[str, Any] , **a_ :List[Any]) -> Any:
pass
A = False
A = logging.get_logger('''transformers-cli/serving''')
def __A ( a_ :Namespace) -> Optional[Any]:
__a : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE_ , args.host , args.port , args.workers)
class __lowercase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __lowercase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __lowercase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __lowercase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __lowercase ( snake_case__ ):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase ):
__a : str = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=_A , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=_A , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=_A , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=_A , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=_A , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=_A , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=_A , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=_A )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = pipeline
__a : Optional[int] = host
__a : int = port
__a : str = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install \"transformers[serving]\".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__a : Optional[Any] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=_A , response_class=_A , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=_A , response_class=_A , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=_A , response_class=_A , methods=['''POST'''] , ),
] , timeout=600 , )
def _lowerCamelCase ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _lowerCamelCase ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _lowerCamelCase ( self , _UpperCAmelCase = Body(_A , embed=_A ) , _UpperCAmelCase = Body(_A , embed=_A ) ):
try:
__a : Optional[int] = self._pipeline.tokenizer.tokenize(_A )
if return_ids:
__a : str = self._pipeline.tokenizer.convert_tokens_to_ids(_A )
return ServeTokenizeResult(tokens=_A , tokens_ids=_A )
else:
return ServeTokenizeResult(tokens=_A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A )} )
def _lowerCamelCase ( self , _UpperCAmelCase = Body(_A , embed=_A ) , _UpperCAmelCase = Body(_A , embed=_A ) , _UpperCAmelCase = Body(_A , embed=_A ) , ):
try:
__a : str = self._pipeline.tokenizer.decode(_A , _A , _A )
return ServeDeTokenizeResult(model='''''' , text=_A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_A )} )
async def _lowerCamelCase ( self , _UpperCAmelCase=Body(_A , embed=_A ) ):
if len(_A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__a : str = self._pipeline(_A )
return ServeForwardResult(output=_A )
except Exception as e:
raise HTTPException(500 , {'''error''': str(_A )} )
| 364
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *_UpperCAmelCase , **_UpperCAmelCase ):
pass
def __A ( a_ :Image) -> str:
__a : List[str] = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def __A ( a_ :Image) -> Dict:
__a : Any = np.array(a_)
__a : Tuple = npimg.shape
return {"hash": hashimage(a_), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _lowerCamelCase ( self ):
pass
@slow
@require_torch
def _lowerCamelCase ( self ):
__a : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
__a : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _lowerCamelCase ( self ):
__a : Dict = '''facebook/sam-vit-huge'''
__a : Tuple = pipeline('''mask-generation''' , model=_UpperCAmelCase )
__a : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
] , )
| 188
| 0
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase_ (__A ):
def __get__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=None ) -> Any:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
UpperCAmelCase_ : Optional[int] = "__cached_" + self.fget.__name__
UpperCAmelCase_ : Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if cached is None:
UpperCAmelCase_ : Union[str, Any] = self.fget(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return cached
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def snake_case ( A__ ):
if is_torch_fx_proxy(A__ ):
return True
if is_torch_available():
import torch
if isinstance(A__ ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A__ ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A__ ,(jnp.ndarray, Tracer) ):
return True
return isinstance(A__ ,np.ndarray )
def snake_case ( A__ ):
return isinstance(A__ ,np.ndarray )
def snake_case ( A__ ):
return _is_numpy(A__ )
def snake_case ( A__ ):
import torch
return isinstance(A__ ,torch.Tensor )
def snake_case ( A__ ):
return False if not is_torch_available() else _is_torch(A__ )
def snake_case ( A__ ):
import torch
return isinstance(A__ ,torch.device )
def snake_case ( A__ ):
return False if not is_torch_available() else _is_torch_device(A__ )
def snake_case ( A__ ):
import torch
if isinstance(A__ ,A__ ):
if hasattr(A__ ,A__ ):
UpperCAmelCase_ : Tuple = getattr(A__ ,A__ )
else:
return False
return isinstance(A__ ,torch.dtype )
def snake_case ( A__ ):
return False if not is_torch_available() else _is_torch_dtype(A__ )
def snake_case ( A__ ):
import tensorflow as tf
return isinstance(A__ ,tf.Tensor )
def snake_case ( A__ ):
return False if not is_tf_available() else _is_tensorflow(A__ )
def snake_case ( A__ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A__ ,"is_symbolic_tensor" ):
return tf.is_symbolic_tensor(A__ )
return type(A__ ) == tf.Tensor
def snake_case ( A__ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(A__ )
def snake_case ( A__ ):
import jax.numpy as jnp # noqa: F811
return isinstance(A__ ,jnp.ndarray )
def snake_case ( A__ ):
return False if not is_flax_available() else _is_jax(A__ )
def snake_case ( A__ ):
if isinstance(A__ ,(dict, UserDict) ):
return {k: to_py_obj(A__ ) for k, v in obj.items()}
elif isinstance(A__ ,(list, tuple) ):
return [to_py_obj(A__ ) for o in obj]
elif is_tf_tensor(A__ ):
return obj.numpy().tolist()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A__ ):
return np.asarray(A__ ).tolist()
elif isinstance(A__ ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def snake_case ( A__ ):
if isinstance(A__ ,(dict, UserDict) ):
return {k: to_numpy(A__ ) for k, v in obj.items()}
elif isinstance(A__ ,(list, tuple) ):
return np.array(A__ )
elif is_tf_tensor(A__ ):
return obj.numpy()
elif is_torch_tensor(A__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A__ ):
return np.asarray(A__ )
else:
return obj
class UpperCamelCase_ (__A ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
UpperCAmelCase_ : List[str] = fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCAmelCase_ : List[str] = getattr(self , class_fields[0].name )
UpperCAmelCase_ : Optional[int] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = first_field.items()
UpperCAmelCase_ : List[str] = True
else:
try:
UpperCAmelCase_ : List[Any] = iter(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = True
except TypeError:
UpperCAmelCase_ : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase_ ):
if (
not isinstance(lowerCAmelCase_ , (list, tuple) )
or not len(lowerCAmelCase_ ) == 2
or not isinstance(element[0] , lowerCAmelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase_ : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase_ : Any = element[1]
elif first_field is not None:
UpperCAmelCase_ : Any = first_field
else:
for field in class_fields:
UpperCAmelCase_ : Optional[int] = getattr(self , field.name )
if v is not None:
UpperCAmelCase_ : Any = v
def __delitem__( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[str] ) -> Tuple:
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Any ) -> Union[str, Any]:
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> str:
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> List[Any]:
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> str:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ )
super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ )
def __setitem__( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Optional[int]:
# Will raise a KeyException if needed
super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCamelCase_ (__A , __A ):
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCamelCase_ (__A ):
__magic_name__ = '''longest'''
__magic_name__ = '''max_length'''
__magic_name__ = '''do_not_pad'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''pt'''
__magic_name__ = '''tf'''
__magic_name__ = '''np'''
__magic_name__ = '''jax'''
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : List[ContextManager] ) -> Any:
UpperCAmelCase_ : int = context_managers
UpperCAmelCase_ : List[Any] = ExitStack()
def __enter__( self : Optional[int] ) -> int:
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase_ )
def __exit__( self : Dict , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> List[Any]:
self.stack.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = infer_framework(A__ )
if framework == "tf":
UpperCAmelCase_ : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase_ : int = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase_ : int = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = model_class.__name__
UpperCAmelCase_ : str = infer_framework(A__ )
if framework == "tf":
UpperCAmelCase_ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase_ : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def snake_case ( A__ ,A__ = "" ,A__ = "." ):
def _flatten_dict(A__ ,A__="" ,A__="." ):
for k, v in d.items():
UpperCAmelCase_ : Optional[int] = str(A__ ) + delimiter + str(A__ ) if parent_key else k
if v and isinstance(A__ ,A__ ):
yield from flatten_dict(A__ ,A__ ,delimiter=A__ ).items()
else:
yield key, v
return dict(_flatten_dict(A__ ,A__ ,A__ ) )
@contextmanager
def snake_case ( A__ ,A__ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def snake_case ( A__ ,A__=None ):
if is_numpy_array(A__ ):
return np.transpose(A__ ,axes=A__ )
elif is_torch_tensor(A__ ):
return array.T if axes is None else array.permute(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.transpose(A__ ,perm=A__ )
elif is_jax_tensor(A__ ):
return jnp.transpose(A__ ,axes=A__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(A__ )}.""" )
def snake_case ( A__ ,A__ ):
if is_numpy_array(A__ ):
return np.reshape(A__ ,A__ )
elif is_torch_tensor(A__ ):
return array.reshape(*A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.reshape(A__ ,A__ )
elif is_jax_tensor(A__ ):
return jnp.reshape(A__ ,A__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(A__ )}.""" )
def snake_case ( A__ ,A__=None ):
if is_numpy_array(A__ ):
return np.squeeze(A__ ,axis=A__ )
elif is_torch_tensor(A__ ):
return array.squeeze() if axis is None else array.squeeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.squeeze(A__ ,axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.squeeze(A__ ,axis=A__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(A__ )}.""" )
def snake_case ( A__ ,A__ ):
if is_numpy_array(A__ ):
return np.expand_dims(A__ ,A__ )
elif is_torch_tensor(A__ ):
return array.unsqueeze(dim=A__ )
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.expand_dims(A__ ,axis=A__ )
elif is_jax_tensor(A__ ):
return jnp.expand_dims(A__ ,axis=A__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(A__ )}.""" )
def snake_case ( A__ ):
if is_numpy_array(A__ ):
return np.size(A__ )
elif is_torch_tensor(A__ ):
return array.numel()
elif is_tf_tensor(A__ ):
import tensorflow as tf
return tf.size(A__ )
elif is_jax_tensor(A__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(A__ )}.""" )
def snake_case ( A__ ,A__ ):
for key, value in auto_map.items():
if isinstance(A__ ,(tuple, list) ):
UpperCAmelCase_ : Union[str, Any] = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase_ : Dict = F"""{repo_id}--{value}"""
return auto_map
def snake_case ( A__ ):
for base_class in inspect.getmro(A__ ):
UpperCAmelCase_ : List[str] = base_class.__module__
UpperCAmelCase_ : List[str] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 268
|
"""simple docstring"""
lowerCamelCase_ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ ( __A, __A, __A, __A=None ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = XLNetConfig.from_json_file(__A )
UpperCAmelCase__ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase__ = finetuning_task
UpperCAmelCase__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase__ = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
UpperCAmelCase__ = finetuning_task
UpperCAmelCase__ = XLNetForQuestionAnswering(__A )
else:
UpperCAmelCase__ = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A, __A, __A )
# Save pytorch-model
UpperCAmelCase__ = os.path.join(__A, __A )
UpperCAmelCase__ = os.path.join(__A, __A )
print(f"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict(), __A )
print(f"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 143
|
from __future__ import annotations
def lowerCAmelCase_ ( __A, __A, __A, ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
def __init__( self , *lowercase , **lowercase ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowercase , )
super().__init__(*lowercase , **lowercase )
| 46
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertModel(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFDistilBertForMaskedLM(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForSequenceClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFDistilBertForMultipleChoice(lowercase )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForTokenClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Dict:
lowerCAmelCase = TFDistilBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
| 46
| 1
|
"""simple docstring"""
from __future__ import annotations
import queue
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Any , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = data
snake_case_ : int = None
snake_case_ : Any = None
def SCREAMING_SNAKE_CASE__ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
snake_case_ : Optional[int] = input('Enter the value of the root node: ' ).strip().lower()
snake_case_ : int = queue.Queue()
snake_case_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
snake_case_ : Dict = q.get()
snake_case_ : Optional[Any] = f"""Enter the left node of {node_found.data}: """
snake_case_ : Optional[int] = input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
snake_case_ : Optional[int] = TreeNode(int(__a ) )
snake_case_ : Any = left_node
q.put(__a )
snake_case_ : Optional[Any] = f"""Enter the right node of {node_found.data}: """
snake_case_ : int = input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
snake_case_ : Any = TreeNode(int(__a ) )
snake_case_ : int = right_node
q.put(__a )
raise
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
snake_case_ : Dict = queue.Queue()
q.put(__a )
while not q.empty():
snake_case_ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
snake_case_ : List[Any] = queue.Queue()
q.put(__a )
while not q.empty():
snake_case_ : str = []
while not q.empty():
snake_case_ : List[Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
snake_case_ : int = []
snake_case_ : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(__a )
snake_case_ : int = n.left
# end of while means current node doesn't have left child
snake_case_ : Union[str, Any] = stack.pop()
# start to traverse its right child
snake_case_ : str = n.right
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
snake_case_ : Dict = []
snake_case_ : int = node
while n or stack:
while n:
stack.append(__a )
snake_case_ : str = n.left
snake_case_ : Any = stack.pop()
print(n.data , end=',' )
snake_case_ : Dict = n.right
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ) or not node:
return
snake_case_ ,snake_case_ : List[str] = [], []
snake_case_ : List[str] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case_ : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def SCREAMING_SNAKE_CASE__ ( __a = "" , __a=50 , __a="*" ):
if not s:
return "\n" + width * char
snake_case_ ,snake_case_ : Optional[Any] = divmod(width - len(__a ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
_SCREAMING_SNAKE_CASE = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 355
|
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int]=None , _A : Dict=False , _A : Dict=False , _A : Optional[Any]=False , ) -> List[str]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[str] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : int = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Optional[Any] = np.asarray(_A )
snake_case_ : Optional[Any] = np.asarray(_A )
if ignore_case:
snake_case_ : int = np.char.lower(_A )
snake_case_ : List[str] = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : str = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Any = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : int = string.digits.maketrans('' , '' , string.digits )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 88
| 0
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
__lowercase, __lowercase= np.shape(lowercase__ )
if rows != columns:
__lowercase= (
'\'table\' has to be of square shaped array but got a '
F'{rows}x{columns} array:\n{table}'
)
raise ValueError(lowercase__ )
__lowercase= np.zeros((rows, columns) )
__lowercase= np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__lowercase= sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
__lowercase= (table[i][j] - total) / upper[j][j]
__lowercase= 1
for j in range(lowercase__ , lowercase__ ):
__lowercase= sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
__lowercase= table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 295
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : Tuple = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314
| 0
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase_: Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
UpperCAmelCase_: str = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
UpperCAmelCase_: Dict = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
UpperCAmelCase_: str = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
UpperCAmelCase_: Tuple = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
UpperCAmelCase_: Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
UpperCAmelCase_: Optional[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
UpperCAmelCase_: int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
UpperCAmelCase_: Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
UpperCAmelCase_: Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
UpperCAmelCase_: List[str] = key.replace("""text_encoder.module""" , """flava.text_model""" )
UpperCAmelCase_: Optional[int] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
UpperCAmelCase_: Union[str, Any] = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
UpperCAmelCase_: List[str] = key.replace("""text_projection""" , """flava.text_projection""" )
UpperCAmelCase_: int = key.replace("""image_projection""" , """flava.image_projection""" )
UpperCAmelCase_: List[Any] = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase_: Any = value
return upgrade
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any]=None ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase_: Optional[Any] = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase_: Optional[int] = FlavaConfig()
UpperCAmelCase_: Union[str, Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
UpperCAmelCase_: List[str] = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
UpperCAmelCase_: Optional[int] = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
else:
UpperCAmelCase_: int = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )
UpperCAmelCase_: Optional[int] = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = hf_model.state_dict()
UpperCAmelCase_: Tuple = count_parameters(lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a : Dict = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 147
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = 'src/diffusers'
a : Optional[Any] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
a : Dict = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : str = spec.loader.load_module()
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: List[str] ):
"""simple docstring"""
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = object_name.split(""".""" )
UpperCAmelCase_: Tuple = 0
# First let's find the module where our object lives.
UpperCAmelCase_: Union[str, Any] = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[Any] = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_: Any = """"""
UpperCAmelCase_: Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_: Dict = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: Optional[int] = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
a : List[str] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
a : Optional[int] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
a : List[Any] = re.compile(r'<FILL\s+[^>]*>')
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: Dict = code.split("""\n""" )
UpperCAmelCase_: Any = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: str = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
UpperCAmelCase_: Union[str, Any] = F'class Bla:\n{code}'
UpperCAmelCase_: int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCAmelCase__ )
UpperCAmelCase_: int = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: int=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[str] = f.readlines()
UpperCAmelCase_: List[str] = []
UpperCAmelCase_: Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
UpperCAmelCase_: Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = search.groups()
UpperCAmelCase_: str = find_code_in_diffusers(lowerCAmelCase__ )
UpperCAmelCase_: int = get_indent(lowerCAmelCase__ )
UpperCAmelCase_: Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_: Tuple = theoretical_indent
UpperCAmelCase_: Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_: Tuple = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
UpperCAmelCase_: Any = lines[line_index]
UpperCAmelCase_: Tuple = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: int = lines[start_index:line_index]
UpperCAmelCase_: Union[str, Any] = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_: int = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
UpperCAmelCase_: Union[str, Any] = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Any = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCAmelCase_: int = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = pattern.groups()
UpperCAmelCase_: int = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
UpperCAmelCase_: List[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_: Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_: Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_: str = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_: Optional[int] = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def lowerCAmelCase_ (lowerCAmelCase__: bool = False ):
"""simple docstring"""
UpperCAmelCase_: Dict = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = []
for filename in all_files:
UpperCAmelCase_: str = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Dict = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 147
| 1
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
pass
| 363
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ = 1_60_00 ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(round(sample_rate * max_length ) )
if len(snake_case__ ) <= sample_length:
return wav
_SCREAMING_SNAKE_CASE = randint(0 ,len(snake_case__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCAmelCase :
__snake_case : Optional[str] = field(default=_UpperCAmelCase ,metadata={"help": "Name of a dataset from the datasets package"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the training audio paths and labels."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the validation audio paths and labels."} )
__snake_case : str = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
__snake_case : str = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
__snake_case : str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
__snake_case : str = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__snake_case : float = field(
default=20 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__snake_case : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Name or path of preprocessor config."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__snake_case : Optional[bool] = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , UpperCAmelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" ,snake_case__ ,snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_SCREAMING_SNAKE_CASE = DatasetDict()
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_SCREAMING_SNAKE_CASE = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_SCREAMING_SNAKE_CASE = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = []
for audio in batch[data_args.audio_column_name]:
_SCREAMING_SNAKE_CASE = random_subsample(
audio["""array"""] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case__ )
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features[data_args.label_column_name].names
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = {}, {}
for i, label in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = str(snake_case__ )
_SCREAMING_SNAKE_CASE = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
_SCREAMING_SNAKE_CASE = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=snake_case__ ,references=eval_pred.label_ids )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(snake_case__ ) ,labelaid=snake_case__ ,idalabel=snake_case__ ,finetuning_task="""audio-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=snake_case__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
# Initialize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=snake_case__ ,args=snake_case__ ,train_dataset=raw_datasets["""train"""] if training_args.do_train else None ,eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None ,compute_metrics=snake_case__ ,tokenizer=snake_case__ ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("""eval""" ,snake_case__ )
trainer.save_metrics("""eval""" ,snake_case__ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 125
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = "https://openaipublic.azureedge.net/jukebox/models/"
__UpperCamelCase : Tuple = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def __A ( __lowerCamelCase ) -> List[str]:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
a = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
a = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
a = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
a = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
a = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
a = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
a = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = {}
import re
a = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
a = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowerCamelCase ):
a = re_encoder_block_conv_in.match(__lowerCamelCase )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] )
a = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
a = re_encoder_block_conv_in.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(__lowerCamelCase ):
a = re_encoder_block_resnet.match(__lowerCamelCase )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] )
a = {"""1""": 1, """3""": 2}[groups[-2]]
a = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
a = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
a = prefix + resnet_block
a = re_encoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(__lowerCamelCase ):
a = re_encoder_block_proj_out.match(__lowerCamelCase )
a = regex_match.groups()
a = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
a = re_encoder_block_proj_out.sub(__lowerCamelCase , __lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowerCamelCase ):
a = re_decoder_block_conv_out.match(__lowerCamelCase )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] ) - 2
a = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
a = re_decoder_block_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(__lowerCamelCase ):
a = re_decoder_block_resnet.match(__lowerCamelCase )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] ) - 2
a = {"""1""": 1, """3""": 2}[groups[-2]]
a = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
a = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
a = prefix + resnet_block
a = re_decoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(__lowerCamelCase ):
a = re_decoder_block_proj_in.match(__lowerCamelCase )
a = regex_match.groups()
a = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
a = re_decoder_block_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowerCamelCase ):
a = re_prior_cond_conv_out.match(__lowerCamelCase )
a = regex_match.groups()
a = int(groups[1] ) * 2 + int(groups[2] ) - 2
a = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
a = re_prior_cond_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(__lowerCamelCase ):
a = re_prior_cond_resnet.match(__lowerCamelCase )
a = regex_match.groups()
a = int(groups[1] ) * 2 + int(groups[2] ) - 2
a = {"""1""": 1, """3""": 2}[groups[-2]]
a = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
a = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
a = prefix + resnet_block
a = re_prior_cond_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(__lowerCamelCase ):
a = re_prior_cond_proj_in.match(__lowerCamelCase )
a = regex_match.groups()
a = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
a = re_prior_cond_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# keep original key
else:
a = original_key
a = replace_key(__lowerCamelCase )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
a = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
a = original_key
a = original_key
a = value
return new_dict
@torch.no_grad()
def __A ( __lowerCamelCase=None , __lowerCamelCase=None ) -> Tuple:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
a = requests.get(f'{PREFIX}{file}' , allow_redirects=__lowerCamelCase )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=__lowerCamelCase )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content )
a = MODEL_MAPPING[model_name.split("""/""" )[-1]]
a = JukeboxConfig.from_pretrained(__lowerCamelCase )
a = JukeboxModel(__lowerCamelCase )
a = []
a = {}
for i, dict_name in enumerate(__lowerCamelCase ):
a = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""]
a = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
a = old_dic[k]
elif k.endswith(""".w""" ):
a = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a = old_dic[k]
else:
a = old_dic[k]
a = """vqvae""" if i == 0 else f'priors.{3 - i}'
a = fix_jukebox_keys(__lowerCamelCase , model.state_dict() , __lowerCamelCase , __lowerCamelCase )
weight_dict.append(__lowerCamelCase )
a = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
return weight_dict
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__UpperCamelCase : int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 228
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCamelCase : Optional[Any] = False
@skip_mps
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase__ = False
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Optional[int]=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = a = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = pipe(**__magic_name__ ).images
a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls :int ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = torch.manual_seed(51 )
a = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
a = """a painting of an elephant with glasses"""
a = [5, 7]
a = pipe(
prompt=__magic_name__ , token_indices=__magic_name__ , guidance_scale=7.5 , generator=__magic_name__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 228
| 1
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : List[Any]=14 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Dict=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : int=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=512 , __lowerCamelCase : Any=16 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Union[str, Any]=None , ):
UpperCamelCase :Optional[int] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :List[str] = seq_length
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Dict = use_token_type_ids
UpperCamelCase :Optional[int] = use_input_mask
UpperCamelCase :Tuple = use_labels
UpperCamelCase :str = use_mc_token_ids
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = num_hidden_layers
UpperCamelCase :Optional[int] = num_attention_heads
UpperCamelCase :int = intermediate_size
UpperCamelCase :Tuple = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :Tuple = attention_probs_dropout_prob
UpperCamelCase :str = max_position_embeddings
UpperCamelCase :str = type_vocab_size
UpperCamelCase :List[str] = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Any = num_labels
UpperCamelCase :Optional[Any] = num_choices
UpperCamelCase :Optional[Any] = scope
UpperCamelCase :List[Any] = self.vocab_size - 1
def _A ( self : Optional[int] ):
UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :str = None
if self.use_input_mask:
UpperCamelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :str = None
if self.use_token_type_ids:
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :Tuple = None
if self.use_mc_token_ids:
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCamelCase :Tuple = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = None
if self.use_labels:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Tuple = self.get_config()
UpperCamelCase :List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Dict ):
UpperCamelCase :Optional[int] = CTRLModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
UpperCamelCase :Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ):
UpperCamelCase :Union[str, Any] = CTRLLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : int ):
UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
UpperCamelCase
) :Any = config_and_inputs
UpperCamelCase :str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , *__lowerCamelCase : Any ):
UpperCamelCase :Any = self.num_labels
UpperCamelCase :Tuple = CTRLForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :int = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
snake_case__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
snake_case__ : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
snake_case__ : Tuple = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = True
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def _A ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self : Optional[int] ):
UpperCamelCase :Tuple = CTRLModelTester(self )
UpperCamelCase :Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def _A ( self : List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[Any] ):
self.config_tester.run_common_tests()
def _A ( self : int ):
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCamelCase )
def _A ( self : str ):
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _A ( self : List[Any] ):
pass
@slow
def _A ( self : List[str] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :List[str] = CTRLModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :)
def _A ( self : Tuple ):
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__lowerCamelCase )
UpperCamelCase :Tuple = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__lowerCamelCase ) # Legal the president is
UpperCamelCase :List[str] = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCamelCase :Optional[Any] = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 351
|
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :List[str] = []
for i in range(len(__magic_name__ ) ):
UpperCamelCase :Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase :Tuple = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase :str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[Image.Image]:
"""simple docstring"""
UpperCamelCase :int = []
for _ in range(__magic_name__ ):
# Create output image
UpperCamelCase :int = Image.new("""RGB""" , (len(cells[0] ), len(__magic_name__ )) )
UpperCamelCase :Tuple = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase :Union[str, Any] = 255 - cells[y][x] * 255
UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
UpperCamelCase :Any = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ : Any = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 62
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : Tuple = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["""BeitFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Optional[Any] = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=[2, 3, 4] , a_=None , ):
'''simple docstring'''
__snake_case : List[str] = parent
__snake_case : str = batch_size
__snake_case : List[Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : str = num_stages
__snake_case : Any = hidden_sizes
__snake_case : Optional[int] = depths
__snake_case : Dict = is_training
__snake_case : Tuple = use_labels
__snake_case : str = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Dict = num_labels
__snake_case : Tuple = initializer_range
__snake_case : Dict = out_features
__snake_case : Optional[int] = out_indices
__snake_case : str = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Dict = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : str = None
__snake_case : Optional[Any] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ConvNextVaModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : int = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
__snake_case : Dict = model_class(a_ )
model.to(a_ )
model.train()
__snake_case : Tuple = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case : List[str] = model(**a_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : Optional[Any] = False
__snake_case : Tuple = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
__snake_case : Union[str, Any] = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case : str = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case : Union[str, Any] = model(**a_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
def check_hidden_states_output(a_ , a_ , a_ ):
__snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : List[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(a_ )
__snake_case : Dict = self.default_image_processor
__snake_case : Tuple = prepare_img()
__snake_case : Tuple = preprocessor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(**a_ )
# verify the logits
__snake_case : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case : Any = torch.tensor([0.9996, 0.1966, -0.4386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
| 102
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase( lowerCamelCase_ ):
"""simple docstring"""
__lowerCamelCase = (PNDMScheduler,)
__lowerCamelCase = (('''num_inference_steps''', 50),)
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__snake_case )
return config
def UpperCAmelCase_ ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= dict(self.forward_default_kwargs )
lowercase__ : Optional[int]= kwargs.pop("num_inference_steps" , __snake_case )
lowercase__ : Union[str, Any]= self.dummy_sample
lowercase__ : Optional[int]= 0.1 * sample
lowercase__ : List[Any]= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[int]= self.get_scheduler_config(**__snake_case )
lowercase__ : Optional[int]= scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
lowercase__ : Union[str, Any]= dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
lowercase__ : Dict= scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
lowercase__ : Optional[int]= dummy_past_residuals[:]
lowercase__ : List[str]= scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
lowercase__ : Optional[int]= new_scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : Tuple= scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
lowercase__ : int= new_scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= dict(self.forward_default_kwargs )
lowercase__ : Any= kwargs.pop("num_inference_steps" , __snake_case )
lowercase__ : str= self.dummy_sample
lowercase__ : Any= 0.1 * sample
lowercase__ : Any= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Tuple= self.get_scheduler_config()
lowercase__ : Tuple= scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : str= dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
lowercase__ : Union[str, Any]= scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Dict= dummy_past_residuals[:]
lowercase__ : Union[str, Any]= scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
lowercase__ : Any= new_scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : int= scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
lowercase__ : Optional[int]= new_scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.scheduler_classes[0]
lowercase__ : Dict= self.get_scheduler_config(**__snake_case )
lowercase__ : Optional[int]= scheduler_class(**__snake_case )
lowercase__ : str= 10
lowercase__ : Union[str, Any]= self.dummy_model()
lowercase__ : Optional[Any]= self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase__ : int= model(__snake_case , __snake_case )
lowercase__ : Any= scheduler.step_prk(__snake_case , __snake_case , __snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase__ : Tuple= model(__snake_case , __snake_case )
lowercase__ : Any= scheduler.step_plms(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= dict(self.forward_default_kwargs )
lowercase__ : List[Any]= kwargs.pop("num_inference_steps" , __snake_case )
for scheduler_class in self.scheduler_classes:
lowercase__ : Dict= self.get_scheduler_config()
lowercase__ : Any= scheduler_class(**__snake_case )
lowercase__ : Any= self.dummy_sample
lowercase__ : Tuple= 0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , "set_timesteps" ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , "set_timesteps" ):
lowercase__ : Any= num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Any= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ : Tuple= dummy_past_residuals[:]
lowercase__ : str= scheduler.step_prk(__snake_case , 0 , __snake_case , **__snake_case ).prev_sample
lowercase__ : Any= scheduler.step_prk(__snake_case , 1 , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ : Optional[Any]= scheduler.step_plms(__snake_case , 0 , __snake_case , **__snake_case ).prev_sample
lowercase__ : str= scheduler.step_plms(__snake_case , 1 , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__snake_case )
lowercase__ : str= self.scheduler_classes[0]
lowercase__ : str= self.get_scheduler_config(steps_offset=1 )
lowercase__ : Optional[int]= scheduler_class(**__snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__snake_case )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=__snake_case )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__snake_case )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase__ : str= 27
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[int]= self.dummy_sample
lowercase__ : Tuple= 0.1 * sample
lowercase__ : Any= self.get_scheduler_config()
lowercase__ : Optional[int]= scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase__ : List[Any]= scheduler.step_prk(__snake_case , __snake_case , __snake_case ).prev_sample
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(__snake_case ):
lowercase__ : Optional[int]= self.scheduler_classes[0]
lowercase__ : Optional[Any]= self.get_scheduler_config()
lowercase__ : List[Any]= scheduler_class(**__snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.full_loop()
lowercase__ : Tuple= torch.sum(torch.abs(__snake_case ) )
lowercase__ : Any= torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.full_loop(prediction_type="v_prediction" )
lowercase__ : List[Any]= torch.sum(torch.abs(__snake_case ) )
lowercase__ : List[str]= torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
lowercase__ : Optional[Any]= self.full_loop(set_alpha_to_one=__snake_case , beta_start=0.01 )
lowercase__ : Any= torch.sum(torch.abs(__snake_case ) )
lowercase__ : Dict= torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# We specify different beta, so that the first alpha is 0.99
lowercase__ : Optional[Any]= self.full_loop(set_alpha_to_one=__snake_case , beta_start=0.01 )
lowercase__ : Dict= torch.sum(torch.abs(__snake_case ) )
lowercase__ : List[Any]= torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 371
|
"""simple docstring"""
from __future__ import annotations
def lowercase__(A , A ) ->list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ : List[str]= number_of_bytes // partitions
lowercase__ : Dict= []
for i in range(A ):
lowercase__ : Union[str, Any]= i * bytes_per_partition + 1
lowercase__ : Any= (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A ( __UpperCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = """A red cat sitting on a park bench"""
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="""np""" , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase__ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
lowercase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = """A red cat sitting on a park bench"""
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type="""np""" , )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 164
|
'''simple docstring'''
def _A ( lowercase__ = 1000000 ):
lowercase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 164
| 1
|
'''simple docstring'''
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_A ) + '/grid.txt' ) as f:
a : Any = [] # noqa: E741
for _ in range(20 ):
l.append([int(_A ) for x in f.readline().split()] )
a : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
a : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a : Optional[int] = temp
# down
for i in range(17 ):
for j in range(20 ):
a : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a : Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a : Dict = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a : Dict = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a : Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 363
|
'''simple docstring'''
lowerCAmelCase: Union[str, Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
lowerCAmelCase: Optional[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = from_type.lower().strip('s' )
a : Optional[Any] = to_type.lower().strip('s' )
a : Dict = UNIT_SYMBOL.get(_A , _A )
a : Optional[Any] = UNIT_SYMBOL.get(_A , _A )
if from_sanitized not in METRIC_CONVERSION:
a : Optional[Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
if to_sanitized not in METRIC_CONVERSION:
a : Union[str, Any] = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
a : List[Any] = METRIC_CONVERSION[from_sanitized]
a : int = METRIC_CONVERSION[to_sanitized]
a : Tuple = 1
if from_exponent > to_exponent:
a : Optional[int] = from_exponent - to_exponent
else:
a : Optional[Any] = -(to_exponent - from_exponent)
return value * pow(10 , _A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 96
| 0
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A__ , n - 1 , A__ ) * a) % mod
else:
UpperCamelCase = binary_exponentiation(A__ , n / 2 , A__ )
return (b * b) % mod
# a prime number
_lowerCamelCase : Dict = 701
_lowerCamelCase : Dict = 10_0000_0000
_lowerCamelCase : Tuple = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 28
|
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase :List[str] = True
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase :List[Any] = True
if a[i].islower():
UpperCamelCase :List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =ConsistencyModelPipeline
a : Optional[int] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a : Dict =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a : Union[str, Any] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def lowercase__ ( self , snake_case__=False ):
"""simple docstring"""
if class_cond:
lowerCAmelCase : List[str] = self.dummy_cond_unet
else:
lowerCAmelCase : List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase : List[str] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Tuple = self.get_dummy_components()
lowerCAmelCase : Dict = ConsistencyModelPipeline(**snake_case__ )
lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[str] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Dict = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Tuple = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Any = self.get_dummy_components(class_cond=snake_case__ )
lowerCAmelCase : Any = ConsistencyModelPipeline(**snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : int = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Union[str, Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : str = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**snake_case__ )
lowerCAmelCase : Optional[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[Any] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Dict = 1
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Union[str, Any] = self.get_dummy_components(class_cond=snake_case__ )
lowerCAmelCase : Optional[int] = ConsistencyModelPipeline(**snake_case__ )
lowerCAmelCase : int = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : str = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self , snake_case__=0 , snake_case__=False , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=(1, 3, 64, 64) ):
"""simple docstring"""
lowerCAmelCase : Tuple = torch.manual_seed(snake_case__ )
lowerCAmelCase : Dict = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
lowerCAmelCase : int = self.get_fixed_latents(seed=snake_case__ , device=snake_case__ , dtype=snake_case__ , shape=snake_case__ )
lowerCAmelCase : Dict = latents
return inputs
def lowercase__ ( self , snake_case__=0 , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=(1, 3, 64, 64) ):
"""simple docstring"""
if type(snake_case__ ) == str:
lowerCAmelCase : Tuple = torch.device(snake_case__ )
lowerCAmelCase : Optional[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
return latents
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase : Tuple = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_inputs()
lowerCAmelCase : List[str] = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Any = self.get_inputs()
lowerCAmelCase : str = 1
lowerCAmelCase : List[str] = None
lowerCAmelCase : Tuple = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
lowerCAmelCase : List[Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase : int = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_inputs(get_fixed_latents=snake_case__ , device=snake_case__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case__ , enable_math=snake_case__ , enable_mem_efficient=snake_case__ ):
lowerCAmelCase : Dict = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase : int = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
lowerCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase : Dict = ConsistencyModelPipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(torch_device=snake_case__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=snake_case__ , device=snake_case__ )
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Tuple = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case__ , enable_math=snake_case__ , enable_mem_efficient=snake_case__ ):
lowerCAmelCase : str = pipe(**snake_case__ ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
lowerCAmelCase : Union[str, Any] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 360
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 133
| 0
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
while second != 0:
_UpperCamelCase : str = first & second
first ^= second
_UpperCamelCase : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = int(input('Enter the first number: ').strip())
snake_case_ : int = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 83
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88
| 0
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(_lowerCamelCase )
if n_element < 1:
_lowerCAmelCase : Any = ValueError("a should be a positive number" )
raise my_error
_lowerCAmelCase : Dict = [1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = (0, 0, 0)
_lowerCAmelCase : Union[str, Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_snake_case = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__lowercase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__lowercase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__lowercase , help="where to store parsed gold_data_path file" , )
_UpperCAmelCase = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_UpperCAmelCase = json.load(__lowercase )
for dpr_record in tqdm(__lowercase ):
_UpperCAmelCase = dpr_record["question"]
_UpperCAmelCase = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__lowercase ) + "\n" )
if __name__ == "__main__":
main()
| 22
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase = pipeline(
'video-classification' , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 222
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 278
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self : Optional[Any] , A__ : str , A__ : Any=13 , A__ : str=[30, 30] , A__ : int=2 , A__ : Dict=3 , A__ : str=True , A__ : Union[str, Any]=True , A__ : Any=32 , A__ : int=5 , A__ : str=4 , A__ : List[Any]=37 , A__ : Union[str, Any]="gelu" , A__ : Dict=0.1 , A__ : Dict=0.1 , A__ : Tuple=10 , A__ : Dict=0.02 , A__ : Any=3 , A__ : Union[str, Any]=None , A__ : Optional[Any]=8 , A__ : Dict=10 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = n_targets
_snake_case = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_snake_case = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_snake_case = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase_ ( self : List[str] ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_snake_case = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_snake_case = []
for i in range(self.batch_size ):
_snake_case = {}
_snake_case = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=A__ )
_snake_case = torch.rand(self.n_targets , 4 , device=A__ )
labels.append(A__ )
_snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Dict ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Any , A__ : Any , A__ : str , A__ : Tuple ) -> Dict:
_snake_case = YolosModel(config=A__ )
model.to(A__ )
model.eval()
_snake_case = model(A__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , A__ : List[str] , A__ : Optional[Any] , A__ : str ) -> int:
_snake_case = YolosForObjectDetection(A__ )
model.to(A__ )
model.eval()
_snake_case = model(pixel_values=A__ )
_snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_snake_case = model(pixel_values=A__ , labels=A__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase_ ( self : Optional[Any] ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
_snake_case, _snake_case, _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ : int = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def UpperCamelCase_ ( self : Dict , A__ : List[Any] , A__ : List[str] , A__ : Optional[int]=False ) -> Optional[int]:
_snake_case = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_snake_case = []
for i in range(self.model_tester.batch_size ):
_snake_case = {}
_snake_case = torch.ones(
size=(self.model_tester.n_targets,) , device=A__ , dtype=torch.long )
_snake_case = torch.ones(
self.model_tester.n_targets , 4 , device=A__ , dtype=torch.float )
labels.append(A__ )
_snake_case = labels
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ) -> List[str]:
_snake_case = YolosModelTester(self )
_snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ) -> str:
# YOLOS does not use inputs_embeds
pass
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(A__ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase_ ( self : List[str] ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase_ ( self : Union[str, Any] ) -> int:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
# in YOLOS, the seq_len is different
_snake_case = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_snake_case = len(A__ )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = 1
self.assertEqual(out_len + added_hidden_states , len(A__ ) )
_snake_case = outputs.attentions
self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase_ ( self : int ) -> Dict:
def check_hidden_states_output(A__ : Optional[int] , A__ : Union[str, Any] , A__ : int ):
_snake_case = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(A__ , A__ ) )
_snake_case = outputs.hidden_states
_snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A__ ) , A__ )
# YOLOS has a different seq_length
_snake_case = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCamelCase_ ( self : Optional[Any] ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*A__ )
@slow
def UpperCamelCase_ ( self : List[str] ) -> Dict:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = YolosModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def snake_case_() -> str:
"""simple docstring"""
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Any ) -> str:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Tuple ) -> str:
_snake_case = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(A__ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
_snake_case = model(inputs.pixel_values )
# verify outputs
_snake_case = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , A__ )
_snake_case = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=A__ , )
_snake_case = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 ) )
# verify postprocessing
_snake_case = image_processor.post_process_object_detection(
A__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_snake_case = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(A__ )
_snake_case = [75, 75, 17, 63, 17]
_snake_case = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(A__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , A__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , A__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , A__ ) )
| 278
| 1
|
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Tuple = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = graph
self._normalize_graph(_lowerCamelCase, _lowerCamelCase )
UpperCamelCase : str = len(_lowerCamelCase )
UpperCamelCase : str = None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
if sources is int:
UpperCamelCase : Tuple = [sources]
if sinks is int:
UpperCamelCase : List[str] = [sinks]
if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) == 0:
return
UpperCamelCase : Optional[Any] = sources[0]
UpperCamelCase : List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowerCamelCase ) > 1 or len(_lowerCamelCase ) > 1:
UpperCamelCase : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0, 0 )
self.graph.insert(0, [0] * size )
for i in sources:
UpperCamelCase : int = max_input_flow
UpperCamelCase : str = 0
UpperCamelCase : List[str] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase : Optional[Any] = max_input_flow
UpperCamelCase : str = size - 1
def snake_case_ ( self ) -> str:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Union[str, Any] = algorithm(self )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = flow_network
UpperCamelCase : Union[str, Any] = flow_network.verticesCount
UpperCamelCase : List[Any] = flow_network.sourceIndex
UpperCamelCase : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase : Optional[int] = flow_network.graph
UpperCamelCase : Optional[int] = False
def snake_case_ ( self ) -> Optional[int]:
if not self.executed:
self._algorithm()
UpperCamelCase : Optional[Any] = True
def snake_case_ ( self ) -> List[str]:
pass
class lowerCAmelCase_ ( __A ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
super().__init__(_lowerCamelCase )
# use this to save your result
UpperCamelCase : Dict = -1
def snake_case_ ( self ) -> Any:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowerCAmelCase_ ( __A ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
super().__init__(_lowerCamelCase )
UpperCamelCase : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase : List[Any] = [0] * self.verticies_count
UpperCamelCase : Union[str, Any] = [0] * self.verticies_count
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase : Optional[int] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase : List[Any] = 0
while i < len(_lowerCamelCase ):
UpperCamelCase : str = vertices_list[i]
UpperCamelCase : Optional[Any] = self.heights[vertex_index]
self.process_vertex(_lowerCamelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0, vertices_list.pop(_lowerCamelCase ) )
UpperCamelCase : Union[str, Any] = 0
else:
i += 1
UpperCamelCase : Any = sum(self.preflow[self.source_index] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowerCamelCase, _lowerCamelCase )
self.relabel(_lowerCamelCase )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = min(
self.excesses[from_index], self.graph[from_index][to_index] - self.preflow[from_index][to_index], )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : Dict = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase : List[Any] = self.heights[to_index]
if min_height is not None:
UpperCamelCase : Optional[Any] = min_height + 1
if __name__ == "__main__":
__UpperCAmelCase = [0]
__UpperCAmelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__UpperCAmelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__UpperCAmelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__UpperCAmelCase = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 119
|
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344
| 0
|
def lowerCamelCase__ ( A__ : str , A__ : bool = False ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
__lowerCamelCase = f'Expected string as input, found {type(A__ )}'
raise ValueError(A__ )
if not isinstance(A__ , A__ ):
__lowerCamelCase = f'Expected boolean as use_pascal parameter, found {type(A__ )}'
raise ValueError(A__ )
__lowerCamelCase = input_str.split("""_""" )
__lowerCamelCase = 0 if use_pascal else 1
__lowerCamelCase = words[start_index:]
__lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
|
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29
| 1
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False, False, False
@dataclass
class A :
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase : str = field(default='Audio' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__(self : int ) -> Dict:
"""simple docstring"""
return self.pa_type
def lowercase_ (self : int , __UpperCAmelCase : Union[str, bytes, dict] ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ = BytesIO()
sf.write(__UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowercase_ (self : int , __UpperCAmelCase : dict , __UpperCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase__ , UpperCAmelCase__ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase__ = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase__ = token_per_repo_id or {}
UpperCAmelCase__ = path.split("::" )[-1]
try:
UpperCAmelCase__ = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ = None
with xopen(__UpperCAmelCase , "rb" , use_auth_token=__UpperCAmelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__UpperCAmelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ = sf.read(__UpperCAmelCase )
UpperCAmelCase__ = array.T
if self.mono:
UpperCAmelCase__ = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowercase_ (self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowercase_ (self : List[Any] , __UpperCAmelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase__ = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase__ = storage.field("bytes" )
else:
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase__ = storage.field("path" )
else:
UpperCAmelCase__ = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def lowercase_ (self : Dict , __UpperCAmelCase : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase : Any ):
with xopen(__UpperCAmelCase , "rb" ) as f:
UpperCAmelCase__ = f.read()
return bytes_
UpperCAmelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 65
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 0
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( snake_case__ ,snake_case__="shi-labs/oneformer_demo" ) -> Union[str, Any]:
"""simple docstring"""
with open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) as f:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for key, info in class_info.items():
_SCREAMING_SNAKE_CASE = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
_SCREAMING_SNAKE_CASE = thing_ids
_SCREAMING_SNAKE_CASE = class_names
return metadata
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any]=7 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=30 , UpperCAmelCase_: List[str]=400 , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_: int=[0.5, 0.5, 0.5] , UpperCAmelCase_: List[str]=10 , UpperCAmelCase_: Optional[int]=False , UpperCAmelCase_: Optional[int]=255 , UpperCAmelCase_: Tuple="shi-labs/oneformer_demo" , UpperCAmelCase_: Union[str, Any]="ade20k_panoptic.json" , UpperCAmelCase_: Union[str, Any]=10 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = class_info_file
_SCREAMING_SNAKE_CASE = prepare_metadata(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = num_text
_SCREAMING_SNAKE_CASE = repo_path
# for the post_process_functions
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = do_reduce_labels
_SCREAMING_SNAKE_CASE = ignore_index
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase ( self: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str]=False ):
'''simple docstring'''
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__snake_case : int = image_processing_class
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """ignore_index""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """class_info_file""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """num_text""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """repo_path""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """metadata""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_reduce_labels""" ) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processing_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Any=False , UpperCAmelCase_: str="np" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_SCREAMING_SNAKE_CASE = self.image_processing_tester.num_labels
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase_ )
if with_segmentation_maps:
_SCREAMING_SNAKE_CASE = num_labels
if is_instance_map:
_SCREAMING_SNAKE_CASE = list(range(UpperCAmelCase_ ) ) * 2
_SCREAMING_SNAKE_CASE = dict(enumerate(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_SCREAMING_SNAKE_CASE = [Image.fromarray(UpperCAmelCase_ ) for annotation in annotations]
_SCREAMING_SNAKE_CASE = image_processor(
UpperCAmelCase_ , ["""semantic"""] * len(UpperCAmelCase_ ) , UpperCAmelCase_ , return_tensors="""pt""" , instance_id_to_semantic_id=UpperCAmelCase_ , pad_and_return_pixel_mask=UpperCAmelCase_ , )
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
def common(UpperCAmelCase_: List[str]=False , UpperCAmelCase_: Optional[int]=None ):
_SCREAMING_SNAKE_CASE = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCAmelCase_ , is_instance_map=UpperCAmelCase_ , segmentation_type=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = inputs["""mask_labels"""]
_SCREAMING_SNAKE_CASE = inputs["""class_labels"""]
_SCREAMING_SNAKE_CASE = inputs["""pixel_values"""]
_SCREAMING_SNAKE_CASE = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCAmelCase_ )
common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" )
common(is_instance_map=UpperCAmelCase_ , segmentation_type="""pil""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.zeros((20, 50) )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = binary_mask_to_rle(UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_SCREAMING_SNAKE_CASE = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_SCREAMING_SNAKE_CASE = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase_ , target_sizes=UpperCAmelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = image_processor.post_process_instance_segmentation(UpperCAmelCase_ , threshold=0 )
self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
_SCREAMING_SNAKE_CASE = self.image_processing_tester.get_fake_oneformer_outputs()
_SCREAMING_SNAKE_CASE = image_processor.post_process_panoptic_segmentation(UpperCAmelCase_ , threshold=0 )
self.assertTrue(len(UpperCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , UpperCAmelCase_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 368
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
UpperCamelCase = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
UpperCamelCase = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.in_layers.2.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.emb_layers.1.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.0.bias']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 ,dim=0 )
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.weight']
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.norm.bias']
_SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 )
_SCREAMING_SNAKE_CASE = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
_SCREAMING_SNAKE_CASE = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""]
_SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""]
_SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""]
_SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""]
_SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = channels_list[i]
_SCREAMING_SNAKE_CASE = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'down_blocks.{i}.downsamplers.0'
_SCREAMING_SNAKE_CASE = F'input_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
_SCREAMING_SNAKE_CASE = current_channels
# hardcoded the mid-block for now
_SCREAMING_SNAKE_CASE = """mid_block.resnets.0"""
_SCREAMING_SNAKE_CASE = """middle_block.0"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.attentions.0"""
_SCREAMING_SNAKE_CASE = """middle_block.1"""
_SCREAMING_SNAKE_CASE = convert_attention(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """mid_block.resnets.1"""
_SCREAMING_SNAKE_CASE = """middle_block.2"""
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""]
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.1'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.resnets.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.0'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ )
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.attentions.{j}'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer}.1'
_SCREAMING_SNAKE_CASE = convert_attention(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = F'up_blocks.{i}.upsamplers.0'
_SCREAMING_SNAKE_CASE = F'output_blocks.{current_layer-1}.2'
_SCREAMING_SNAKE_CASE = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""]
_SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 125
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85
| 0
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def a_ ( self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
# get prompt text embeddings
UpperCamelCase : Optional[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
UpperCamelCase : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[Any] = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : Any = [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : str = negative_prompt
UpperCamelCase : Tuple = text_input_ids.shape[-1]
UpperCamelCase : Dict = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : Optional[Any] = uncond_embeddings.shape[1]
UpperCamelCase : str = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase : str = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Any = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
UpperCamelCase : Tuple = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCamelCase : Tuple = latents_reference.to(self.device )
UpperCamelCase : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase : str = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase : Tuple = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase : Optional[int] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase : int = 0 if dx < 0 else dx
UpperCamelCase : Union[str, Any] = 0 if dy < 0 else dy
UpperCamelCase : List[Any] = max(-dx , 0 )
UpperCamelCase : Union[str, Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase : List[str] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : Any = {}
if accepts_eta:
UpperCamelCase : List[Any] = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Tuple = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase : int = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase : Tuple = noise_pred.chunk(2 )
UpperCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Dict = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = 1 / 0.18215 * latents
UpperCamelCase : Dict = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase : Any = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase : Optional[int] = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase : List[str] = None
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 355
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : Any = AudioLDMPipeline
lowercase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowercase : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase : Tuple = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase : Optional[int] = ClapTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
UpperCamelCase : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a_ ( self ):
UpperCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : int = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Tuple = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[str] = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
UpperCamelCase : Tuple = prompt_embeds
# forward
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * ["""this is a negative prompt"""]
UpperCamelCase : List[Any] = negative_prompt
UpperCamelCase : str = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : str = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
UpperCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCamelCase : int = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Union[str, Any] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
embeds.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Tuple = embeds
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = """egg cracking"""
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Union[str, Any] = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase : List[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase : Dict = 2
UpperCamelCase : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase : List[str] = 2
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase : Any = 2
UpperCamelCase : str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe(audio_length_in_s=0.016 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.032
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Optional[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = ["""hey"""]
UpperCamelCase : Dict = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : str = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase : str = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a_ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@slow
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a_ ( self ):
UpperCamelCase : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 25
UpperCamelCase : Optional[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[7_7230:7_7240]
UpperCamelCase : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a_ ( self ):
UpperCamelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase : str = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[2_7780:2_7790]
UpperCamelCase : Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 27
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 97
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 97
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase : str = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 160
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 160
| 1
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase__ ( _lowerCamelCase : int ) -> int:
lowerCamelCase_ = prime_factors(_lowerCamelCase )
if is_square_free(_lowerCamelCase ):
return -1 if len(_lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar('''T''')
class a ( Generic[T] ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class a ( Generic[T] ):
def __init__( self : Any ) -> None:
# map from node name to the node object
lowerCamelCase_ = {}
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# create a new set with x as its member
lowerCamelCase_ = DisjointSetTreeNode(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : DisjointSetTreeNode[T] , __SCREAMING_SNAKE_CASE : DisjointSetTreeNode[T] ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__SCREAMING_SNAKE_CASE ) , self.find_set(__SCREAMING_SNAKE_CASE ) )
class a ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
lowerCamelCase_ = {}
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCamelCase_ = {}
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# add an edge with the given weight
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def UpperCamelCase ( self : List[Any] ) -> GraphUndirectedWeighted[T]:
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __SCREAMING_SNAKE_CASE : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__SCREAMING_SNAKE_CASE )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
disjoint_set.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return graph
| 183
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352
|
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = len(UpperCamelCase_ )
snake_case = [[0] * n for i in range(UpperCamelCase_ )]
for i in range(UpperCamelCase_ ):
snake_case = y_points[i]
for i in range(2 ,UpperCamelCase_ ):
for j in range(UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Any = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
|
from __future__ import annotations
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ): # noqa: E741
while r - l > 1:
UpperCamelCase_ : Union[str, Any] = (l + r) // 2
if v[m] >= key:
UpperCamelCase_ : str = m
else:
UpperCamelCase_ : List[Any] = m # noqa: E741
return r
def __lowercase ( lowerCamelCase : list[int] ):
if len(lowerCamelCase ) == 0:
return 0
UpperCamelCase_ : Tuple = [0] * len(lowerCamelCase )
UpperCamelCase_ : int = 1
UpperCamelCase_ : Dict = v[0]
for i in range(1 , len(lowerCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase_ : Any = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase_ : Dict = v[i]
length += 1
else:
UpperCamelCase_ : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 0
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : list[tuple[float, float]]):
_A : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A : Optional[int] = len(SCREAMING_SNAKE_CASE) - 1
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , SCREAMING_SNAKE_CASE) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE) , 5) == 1
return output_values
def A ( self : str , SCREAMING_SNAKE_CASE : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A : Tuple = self.basis_function(SCREAMING_SNAKE_CASE)
_A : List[str] = 0.0
_A : List[str] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A ( self : Any , SCREAMING_SNAKE_CASE : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
_A : list[float] = [] # x coordinates of points to plot
_A : list[float] = [] # y coordinates of points to plot
_A : Optional[int] = 0.0
while t <= 1:
_A : Union[str, Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
_A : List[str] = [i[0] for i in self.list_of_points]
_A : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color='blue' , label='Curve of Degree ' + str(self.degree) , )
plt.scatter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color='red' , label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 365
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : List[Any] = i / num_diffusion_timesteps
_A : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = [e.name for e in KarrasDiffusionSchedulers]
a = 2
@register_to_config
def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0_0085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ):
if trained_betas is not None:
_A : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Any = 1.0 - self.betas
_A : List[Any] = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None):
if schedule_timesteps is None:
_A : Dict = self.timesteps
_A : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
_A : Dict = 1 if len(SCREAMING_SNAKE_CASE) > 1 else 0
else:
_A : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
_A : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A ( self : Optional[Any]):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ):
_A : Tuple = self.index_for_timestep(SCREAMING_SNAKE_CASE)
if self.state_in_first_order:
_A : Any = self.sigmas[step_index]
else:
_A : int = self.sigmas_interpol[step_index]
_A : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ):
_A : Optional[Any] = num_inference_steps
_A : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A : Tuple = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : int = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A : List[str] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : str = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio)).round().copy().astype(SCREAMING_SNAKE_CASE)
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.')
_A : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
_A : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : str = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE)) , SCREAMING_SNAKE_CASE)
_A : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(device=SCREAMING_SNAKE_CASE)
# interpolate sigmas
_A : Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
_A : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
_A : List[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
# mps does not support float64
_A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
else:
_A : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
# interpolate timesteps
_A : Optional[int] = self.sigma_to_t(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype)
_A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
_A : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps])
_A : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
# get log sigma
_A : Dict = sigma.log()
# get distribution
_A : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_A : Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
_A : Union[str, Any] = low_idx + 1
_A : Dict = self.log_sigmas[low_idx]
_A : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_A : Dict = (low - log_sigma) / (low - high)
_A : Union[str, Any] = w.clamp(0 , 1)
# transform interpolation to time range
_A : int = (1 - w) * low_idx + w * high_idx
_A : Any = t.view(sigma.shape)
return t
@property
def A ( self : Any):
return self.sample is None
def A ( self : int , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ):
_A : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE)
# advance index counter by 1
_A : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A : Tuple = self.sigmas[step_index]
_A : Dict = self.sigmas_interpol[step_index + 1]
_A : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_A : int = self.sigmas[step_index - 1]
_A : Union[str, Any] = self.sigmas_interpol[step_index]
_A : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A : List[Any] = 0
_A : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = sigma_hat if self.state_in_first_order else sigma_interpol
_A : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`')
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A : str = sigma_interpol - sigma_hat
# store for 2nd order step
_A : List[str] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_A : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_A : Optional[int] = sigma_next - sigma_hat
_A : str = self.sample
_A : Any = None
_A : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE):
# mps does not support float64
_A : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa)
_A : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
_A : str = self.timesteps.to(original_samples.device)
_A : str = timesteps.to(original_samples.device)
_A : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for t in timesteps]
_A : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
_A : List[Any] = sigma.unsqueeze(-1)
_A : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=2, UpperCamelCase__=3, UpperCamelCase__=4, UpperCamelCase__=2, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=99, UpperCamelCase__=36, UpperCamelCase__=3, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=16, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=6, UpperCamelCase__=6, UpperCamelCase__=3, UpperCamelCase__=4, UpperCamelCase__=None, UpperCamelCase__=1000, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = coordinate_size
lowerCAmelCase_ = shape_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
lowerCAmelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = (image_size // patch_size) ** 2 + 1
lowerCAmelCase_ = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ = bbox[i, j, 3]
lowerCAmelCase_ = bbox[i, j, 1]
lowerCAmelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ = bbox[i, j, 2]
lowerCAmelCase_ = bbox[i, j, 0]
lowerCAmelCase_ = t
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
lowerCAmelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
lowerCAmelCase_ = model(UpperCamelCase__, pixel_values=UpperCamelCase__ )
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
lowerCAmelCase_ = model(UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
lowerCAmelCase_ = model(UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase_ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, labels=UpperCamelCase__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, labels=UpperCamelCase__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(
UpperCamelCase__, bbox=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, start_positions=UpperCamelCase__, end_positions=UpperCamelCase__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
lowerCAmelCase_ = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(UpperCamelCase__, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
lowerCAmelCase_ = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
lowerCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=UpperCamelCase__, )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase ( ):
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values.to(UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([[1, 2]] )
lowerCAmelCase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase_ = model(
input_ids=input_ids.to(UpperCamelCase__ ), bbox=bbox.to(UpperCamelCase__ ), pixel_values=pixel_values.to(UpperCamelCase__ ), )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1E-4 ) )
| 278
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278
| 1
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowerCamelCase( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(a ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def _lowerCamelCase( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def _lowerCamelCase( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(a ):
http_head("https://huggingface.co" )
| 353
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a , a , a , ):
__a = len(a )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a , a , )
def _lowerCamelCase( a ):
__a = []
depth_first_search([] , [] , [] , a , a )
# Print all the boards
for board in boards:
for column in board:
print(a )
print("" )
print(len(a ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 268
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29
|
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29
| 1
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def lowercase_ ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 ):
"""simple docstring"""
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Optional[int] = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Optional[int] = 8
else:
__UpperCAmelCase : List[Any] = None
return tokenizer.pad(
lowerCAmelCase__ , padding="""longest""" , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__UpperCAmelCase : int = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase__ ) == "1":
__UpperCAmelCase : Optional[int] = 2
# Initialize accelerator
__UpperCAmelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Tuple = config["""lr"""]
__UpperCAmelCase : Optional[Any] = int(config["""num_epochs"""] )
__UpperCAmelCase : Optional[Any] = int(config["""seed"""] )
__UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
__UpperCAmelCase : List[Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase__ )
def inner_training_loop(lowerCAmelCase__ : Dict ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : List[str] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
__UpperCAmelCase , __UpperCAmelCase : List[str] = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate scheduler
__UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : List[str] = model(**lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = outputs.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Dict = model(**lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__UpperCAmelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : Tuple = parser.parse_args()
__UpperCAmelCase : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 16
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
self.assertGreater(__UpperCAmelCase , 0 )
self.assertEqual(
__UpperCAmelCase , [
{
"""score""": ANY(__UpperCAmelCase ),
"""label""": ANY(__UpperCAmelCase ),
"""box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )},
}
for i in range(__UpperCAmelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
__UpperCAmelCase : str = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
__UpperCAmelCase : Any = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 0.2
__UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 16
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''dpr'''
def __init__( self: Optional[Any] , UpperCamelCase_: List[Any]=30_522 , UpperCamelCase_: Optional[Any]=768 , UpperCamelCase_: Dict=12 , UpperCamelCase_: str=12 , UpperCamelCase_: Dict=3_072 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Dict=512 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Optional[int]=1E-1_2 , UpperCamelCase_: str=0 , UpperCamelCase_: Optional[Any]="absolute" , UpperCamelCase_: int = 0 , **UpperCamelCase_: Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = projection_dim
lowercase__ = position_embedding_type
| 110
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase = logging.getLogger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''token-classification'''
def __init__( self: int , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowercase__ = Namespace(**UpperCamelCase_ )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(UpperCamelCase_ , hparams.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
lowercase__ = self.token_classification_task.get_labels(hparams.labels )
lowercase__ = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase__ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowercase__ = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: bool = False ) -> DataLoader:
"""simple docstring"""
lowercase__ = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
lowercase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
"""Compute validation""" ""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ , lowercase__ = outputs[:2]
lowercase__ = logits.detach().cpu().numpy()
lowercase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowercase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowercase__ = np.argmax(UpperCamelCase_ , axis=2 )
lowercase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase__ = dict(enumerate(self.labels ) )
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowercase__ = dict(results.items() )
lowercase__ = results
return ret, preds_list, out_label_list
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
lowercase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = NERTransformer(args)
lowerCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 110
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Optional[int] = KandinskyVaaControlnetPipeline
_UpperCamelCase : Union[str, Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCamelCase : Any = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCamelCase : Optional[int] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCamelCase : Optional[int] = False
@property
def __a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return 32
@property
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
return 32
@property
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.time_input_dim
@property
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __a ( self : Tuple ) -> str:
"""simple docstring"""
return 100
@property
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Tuple = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase : Dict = UNetaDConditionModel(**_A )
return model
@property
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self : Tuple ) -> str:
"""simple docstring"""
lowercase : List[str] = self.dummy_unet
lowercase : Optional[Any] = self.dummy_movq
lowercase : int = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_A , )
lowercase : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __a ( self : Any , _A : Union[str, Any] , _A : Optional[int]=0 ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
lowercase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create hint
lowercase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
lowercase : str = torch.manual_seed(_A )
else:
lowercase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
lowercase : Optional[int] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __a ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase : Dict = '''cpu'''
lowercase : List[str] = self.get_dummy_components()
lowercase : Any = self.pipeline_class(**_A )
lowercase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : str = pipe(**self.get_dummy_inputs(_A ) )
lowercase : Tuple = output.images
lowercase : Dict = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
lowercase : Optional[int] = image[0, -3:, -3:, -1]
lowercase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Any = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
lowercase : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase : Union[str, Any] = torch.from_numpy(np.array(_A ) ).float() / 255.0
lowercase : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
lowercase : Optional[int] = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase : Tuple = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
lowercase : Optional[int] = '''A robot, 4k photo'''
lowercase : Tuple = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase : Tuple = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase : int = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase : List[str] = pipeline(
image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=100 , output_type='''np''' , )
lowercase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A , _A )
| 371
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def snake_case( __magic_name__ = None ) -> str:
'''simple docstring'''
lowercase : List[Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__magic_name__ , __magic_name__ ):
ua += "; " + user_agent
return ua
def snake_case( __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
lowercase : int = HfFolder.get_token()
if organization is None:
lowercase : List[str] = whoami(__magic_name__ )['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__magic_name__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowercase : Optional[Any] = args.hub_token if hasattr(__magic_name__ , '''hub_token''' ) else None
lowercase : int = get_full_repo_name(__magic_name__ , token=__magic_name__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__magic_name__ , model_name=__magic_name__ , repo_name=__magic_name__ , dataset_name=args.dataset_name if hasattr(__magic_name__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__magic_name__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__magic_name__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__magic_name__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__magic_name__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__magic_name__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__magic_name__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__magic_name__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__magic_name__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__magic_name__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowercase : Any = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ = None ) -> int:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : Dict = str(Path(__magic_name__ ).as_posix() )
lowercase : Any = re.search(r'''snapshots/([^/]+)/''' , __magic_name__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__magic_name__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def snake_case( __magic_name__ = None , __magic_name__ = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
lowercase : str = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : str = Path(__magic_name__ ).expanduser()
lowercase : Dict = Path(__magic_name__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : List[Any] = new_cache_dir / old_blob_path.relative_to(__magic_name__ )
new_blob_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
os.replace(__magic_name__ , __magic_name__ )
try:
os.symlink(__magic_name__ , __magic_name__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def snake_case( __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
if variant is not None:
lowercase : List[str] = weights_name.split('''.''' )
lowercase : Any = splits[:-1] + [variant] + splits[-1:]
lowercase : Tuple = '''.'''.join(__magic_name__ )
return weights_name
def snake_case( __magic_name__ , *,
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] = str(__magic_name__ )
if os.path.isfile(__magic_name__ ):
return pretrained_model_name_or_path
elif os.path.isdir(__magic_name__ ):
if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) ):
# Load from a PyTorch checkpoint
lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__magic_name__ , __magic_name__ , __magic_name__ ) ):
lowercase : str = os.path.join(__magic_name__ , __magic_name__ , __magic_name__ )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__magic_name__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowercase : int = hf_hub_download(
__magic_name__ , filename=_add_variant(__magic_name__ , __magic_name__ ) , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __magic_name__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__magic_name__ , __magic_name__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__magic_name__ , __magic_name__ )}' so that the correct variant file can be added.""" , __magic_name__ , )
try:
# 2. Load model file as usual
lowercase : Dict = hf_hub_download(
__magic_name__ , filename=__magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , proxies=__magic_name__ , resume_download=__magic_name__ , local_files_only=__magic_name__ , use_auth_token=__magic_name__ , user_agent=__magic_name__ , subfolder=__magic_name__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 116
| 0
|
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase__ ( *_UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
SCREAMING_SNAKE_CASE__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
SCREAMING_SNAKE_CASE__ = torch.device("cuda", local_rank)
SCREAMING_SNAKE_CASE__ = socket.gethostname()
SCREAMING_SNAKE_CASE__ = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
SCREAMING_SNAKE_CASE__ = dist.get_rank()
SCREAMING_SNAKE_CASE__ = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 150
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : Any = "mra"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=5_0_2_6_5 , __lowerCAmelCase : List[Any]=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : Union[str, Any]=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : str=1 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1E-5 , __lowerCAmelCase : Any="absolute" , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Optional[int]="full" , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Tuple=2 , **__lowerCAmelCase : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : Dict = block_per_row
_lowerCamelCase : int = approx_mode
_lowerCamelCase : Dict = initial_prior_first_n_blocks
_lowerCamelCase : List[str] = initial_prior_diagonal_n_blocks
| 175
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
snake_case__ : Optional[datasets.Features] = None
def snake_case_ ( A_ : "pyspark.sql.DataFrame", A_ : List[int], ):
'''simple docstring'''
import pyspark
def generate_fn():
_lowerCamelCase : int = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_lowerCamelCase : Any = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
_lowerCamelCase : Optional[int] = partition_df.collect()
_lowerCamelCase : List[str] = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
def __init__( self : Tuple , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : Optional[int]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = df
_lowerCamelCase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCamelCase : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : np.random.Generator ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.split_shard_indices_by_worker(__lowerCAmelCase , __lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
snake_case__ : List[Any] = SparkConfig
def __init__( self : Union[str, Any] , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCamelCase : int = df
_lowerCamelCase : Any = working_dir
super().__init__(
cache_dir=__lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
def create_cache_and_write_probe(__lowerCAmelCase : Optional[int] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCamelCase : Optional[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(__lowerCAmelCase : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_lowerCamelCase : Any = self.df.count()
_lowerCamelCase : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCamelCase : List[Any] = (
self.df.limit(__lowerCAmelCase )
.repartition(1 )
.mapInArrow(__lowerCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCamelCase : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCamelCase : List[str] = min(__lowerCAmelCase , int(approx_total_size / max_shard_size ) )
_lowerCamelCase : Optional[int] = self.df.repartition(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Optional[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_lowerCamelCase : List[Any] = os.path.join(self._working_dir , os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath
_lowerCamelCase : Dict = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCamelCase : str = self.config.features
_lowerCamelCase : Dict = self._writer_batch_size
_lowerCamelCase : List[str] = self._fs.storage_options
def write_arrow(__lowerCAmelCase : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCamelCase : List[str] = pyspark.TaskContext().taskAttemptId()
_lowerCamelCase : Any = next(__lowerCAmelCase , __lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Optional[int] = writer_class(
features=__lowerCAmelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : int = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCamelCase , _lowerCamelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_lowerCamelCase : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(__lowerCAmelCase )
if writer._num_bytes > 0:
_lowerCamelCase , _lowerCamelCase : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , os.path.basename(__lowerCAmelCase ) )
shutil.move(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = (
self.df.mapInArrow(__lowerCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : "datasets.SplitGenerator" , __lowerCAmelCase : str = "arrow" , __lowerCAmelCase : Optional[Union[str, int]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
self._validate_cache_dir()
_lowerCamelCase : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCAmelCase )
_lowerCamelCase : str = not is_remote_filesystem(self._fs )
_lowerCamelCase : Tuple = os.path.join if is_local else posixpath.join
_lowerCamelCase : int = '''-TTTTT-SSSSS-of-NNNNN'''
_lowerCamelCase : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_lowerCamelCase : List[Any] = path_join(self._output_dir , __lowerCAmelCase )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : str = 0
_lowerCamelCase : int = []
_lowerCamelCase : List[str] = []
for task_id, content in self._prepare_split_single(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCAmelCase )
_lowerCamelCase : int = total_num_examples
_lowerCamelCase : str = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_lowerCamelCase : Optional[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCamelCase : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
rename(
__lowerCAmelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Any = 0
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase , _lowerCamelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(__lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCAmelCase , len(__lowerCAmelCase ) ).map(lambda __lowerCAmelCase : _rename_shard(*__lowerCAmelCase ) ).collect()
else:
# don't use any pattern
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(__lowerCAmelCase , '''''' ) , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 175
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__lowerCAmelCase = """beit"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any]=8192 , lowerCamelCase_ : Dict=768 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : int=3072 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Any=0.0_2 , lowerCamelCase_ : Any=1E-12 , lowerCamelCase_ : str=224 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Any=False , lowerCamelCase_ : str=False , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[Any]=[3, 5, 7, 11] , lowerCamelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[Any]=0.4 , lowerCamelCase_ : Optional[Any]=256 , lowerCamelCase_ : str=1 , lowerCamelCase_ : str=False , lowerCamelCase_ : Union[str, Any]=255 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = use_mask_token
UpperCamelCase = use_absolute_position_embeddings
UpperCamelCase = use_relative_position_bias
UpperCamelCase = use_shared_relative_position_bias
UpperCamelCase = layer_scale_init_value
UpperCamelCase = drop_path_rate
UpperCamelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase = out_indices
UpperCamelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase = use_auxiliary_head
UpperCamelCase = auxiliary_loss_weight
UpperCamelCase = auxiliary_channels
UpperCamelCase = auxiliary_num_convs
UpperCamelCase = auxiliary_concat_input
UpperCamelCase = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return 1E-4
| 343
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( snake_case_ ):
lowercase = 'megatron-bert'
def __init__( self : List[str] , snake_case : Tuple=2_9_0_5_6 , snake_case : Dict=1_0_2_4 , snake_case : Dict=2_4 , snake_case : Union[str, Any]=1_6 , snake_case : Optional[int]=4_0_9_6 , snake_case : Optional[int]="gelu" , snake_case : Any=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[int]=5_1_2 , snake_case : List[Any]=2 , snake_case : Tuple=0.02 , snake_case : Optional[Any]=1e-12 , snake_case : str=0 , snake_case : Optional[int]="absolute" , snake_case : Union[str, Any]=True , **snake_case : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , **snake_case )
UpperCamelCase_ : Optional[Any] = vocab_size
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : List[Any] = num_attention_heads
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : List[str] = intermediate_size
UpperCamelCase_ : List[Any] = hidden_dropout_prob
UpperCamelCase_ : Any = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : Optional[Any] = layer_norm_eps
UpperCamelCase_ : Dict = position_embedding_type
UpperCamelCase_ : List[str] = use_cache
| 175
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , snake_case : str , snake_case : Optional[Any]=7 , snake_case : Dict=3 , snake_case : List[Any]=30 , snake_case : Union[str, Any]=400 , snake_case : Optional[Any]=True , snake_case : Union[str, Any]=None , snake_case : List[str]=0.9 , snake_case : str=None , snake_case : Dict=True , snake_case : str=[0.5, 0.5, 0.5] , snake_case : List[Any]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__UpperCAmelCase : List[str] = size if size is not None else {'''shortest_edge''': 30}
__UpperCAmelCase : Any = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : List[str] = min_resolution
__UpperCAmelCase : Tuple = max_resolution
__UpperCAmelCase : Tuple = do_resize_and_center_crop
__UpperCAmelCase : str = size
__UpperCAmelCase : str = crop_pct
__UpperCAmelCase : Union[str, Any] = crop_size
__UpperCAmelCase : Dict = do_normalize
__UpperCAmelCase : int = image_mean
__UpperCAmelCase : Optional[int] = image_std
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : int ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Tuple ) -> Any:
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(snake_case , '''size''' ) )
self.assertTrue(hasattr(snake_case , '''crop_pct''' ) )
self.assertTrue(hasattr(snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case , '''image_std''' ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
__UpperCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
pass
def lowerCamelCase__ ( self : Any ) -> List[Any]:
# Initialize image_processing
__UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
# Initialize image_processing
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
# Initialize image_processing
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase : str = image_processing(snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 351
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__UpperCAmelCase :Dict = logging.get_logger(__name__)
__UpperCAmelCase :str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase :Optional[int] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase :Union[str, Any] = {
"bert-base-uncased": 5_1_2,
"bert-large-uncased": 5_1_2,
"bert-base-cased": 5_1_2,
"bert-large-cased": 5_1_2,
"bert-base-multilingual-uncased": 5_1_2,
"bert-base-multilingual-cased": 5_1_2,
"bert-base-chinese": 5_1_2,
"bert-base-german-cased": 5_1_2,
"bert-large-uncased-whole-word-masking": 5_1_2,
"bert-large-cased-whole-word-masking": 5_1_2,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-base-cased-finetuned-mrpc": 5_1_2,
"bert-base-german-dbmdz-cased": 5_1_2,
"bert-base-german-dbmdz-uncased": 5_1_2,
"TurkuNLP/bert-base-finnish-cased-v1": 5_1_2,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_1_2,
"wietsedv/bert-base-dutch-cased": 5_1_2,
}
__UpperCAmelCase :str = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer
def __init__( self : Optional[int] , snake_case : Union[str, Any]=None , snake_case : str=None , snake_case : Any=True , snake_case : Tuple="[UNK]" , snake_case : int="[SEP]" , snake_case : Optional[Any]="[PAD]" , snake_case : int="[CLS]" , snake_case : Optional[Any]="[MASK]" , snake_case : Union[str, Any]=True , snake_case : List[Any]=None , **snake_case : int , ) -> str:
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case ) != tokenize_chinese_chars
):
__UpperCAmelCase : List[Any] = getattr(snake_case , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : List[Any] = do_lower_case
__UpperCAmelCase : List[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**snake_case )
__UpperCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[Any] , snake_case : Tuple , snake_case : List[str]=None ) -> str:
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 240
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowerCamelCase : str = "text"
lowerCamelCase : str = "labels"
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> Tuple:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 55
|
'''simple docstring'''
from __future__ import annotations
def a_ ( lowerCamelCase : list[float] , lowerCamelCase : list[float] ):
lowerCAmelCase = sorted(numsa + numsa )
lowerCAmelCase , lowerCAmelCase = divmod(len(lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case =[float(x) for x in input("""Enter the elements of first array: """).split()]
__snake_case =[float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 55
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A )
lowerCAmelCase_ = flatten_dict(_A )
return flax_params
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCAmelCase_ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase_ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase_ = new_key.replace(_A , _A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A )
lowerCAmelCase_ = flax_dict[key]
lowerCAmelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCamelCase ( _A , _A , _A=False , _A=False ):
lowerCAmelCase_ = get_flax_param(_A )
if not use_large:
lowerCAmelCase_ = PixaStructVisionConfig()
lowerCAmelCase_ = PixaStructTextConfig()
else:
lowerCAmelCase_ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCAmelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A )
lowerCAmelCase_ = PixaStructForConditionalGeneration(_A )
lowerCAmelCase_ = rename_and_convert_flax_params(_A )
model.load_state_dict(_A )
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCAmelCase_ = PixaStructImageProcessor()
lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A )
if use_large:
lowerCAmelCase_ = 4096
lowerCAmelCase_ = True
# mkdir if needed
os.makedirs(_A , exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
print('''Model saved in {}'''.format(_A ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 278
|
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__a :Tuple = logging.get_logger(__name__)
class _a ( enum.Enum ):
"""simple docstring"""
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = 1
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 'generated'
def __init__( self : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : str ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __A ( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : Any , ):
A_ = {}
if truncation is not None:
A_ = truncation
A_ = generate_kwargs
A_ = {}
if return_tensors is not None and return_type is None:
A_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A_ = return_type
if clean_up_tokenization_spaces is not None:
A_ = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ = self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
if len(UpperCAmelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
A_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __A ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
return True
def __A ( self : Dict , *UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
A_ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
A_ = ([prefix + arg for arg in args[0]],)
A_ = True
elif isinstance(args[0] , UpperCAmelCase ):
A_ = (prefix + args[0],)
A_ = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
A_ = self.tokenizer(*UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
A_ = super().__call__(*UpperCAmelCase , **UpperCAmelCase )
if (
isinstance(args[0] , UpperCAmelCase )
and all(isinstance(UpperCAmelCase , UpperCAmelCase ) for el in args[0] )
and all(len(UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase : str ):
A_ = self._parse_and_tokenize(UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase )
return inputs
def __A ( self : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
if self.framework == "pt":
A_ , A_ = model_inputs["input_ids"].shape
elif self.framework == "tf":
A_ , A_ = tf.shape(model_inputs["input_ids"] ).numpy()
A_ = generate_kwargs.get("min_length" , self.model.config.min_length )
A_ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(UpperCAmelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
A_ = self.model.generate(**UpperCAmelCase , **UpperCAmelCase )
A_ = output_ids.shape[0]
if self.framework == "pt":
A_ = output_ids.reshape(UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A_ = tf.reshape(UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=ReturnType.TEXT , UpperCAmelCase : Tuple=False ):
A_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A_ = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
A_ = {
f'''{self.return_name}_text''': self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , )
}
records.append(UpperCAmelCase )
return records
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 'summary'
def __call__( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 'translation'
def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def __A ( self : str , *UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None ):
if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase , return_tensors=self.framework , truncation=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase )
else:
return super()._parse_and_tokenize(*UpperCAmelCase , truncation=UpperCAmelCase )
def __A ( self : Optional[int] , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
A_ , A_ , A_ = super()._sanitize_parameters(**UpperCAmelCase )
if src_lang is not None:
A_ = src_lang
if tgt_lang is not None:
A_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A_ = kwargs.get("task" , self.task )
A_ = task.split("_" )
if task and len(UpperCAmelCase ) == 4:
# translation, XX, to YY
A_ = items[1]
A_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *UpperCAmelCase : Any , **UpperCAmelCase : Any ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
| 329
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase__ : str =None
lowerCAmelCase__ : str =logging.get_logger(__name__)
lowerCAmelCase__ : Dict ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Optional[Any] ={
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Tuple ={
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
lowerCAmelCase__ : List[str] ='''▁'''
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[str] = BarthezTokenizer
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , **_A , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 257
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = MvpTokenizer
UpperCamelCase__ : Tuple = MvpTokenizerFast
UpperCamelCase__ : int = True
UpperCamelCase__ : Tuple = filter_roberta_detectors
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _A ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _A ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__SCREAMING_SNAKE_CASE = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
# Test that special tokens are reset
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , padding=_A , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('labels' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(text_target=_A , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _A ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=_A , truncation=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.']
__SCREAMING_SNAKE_CASE = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , text_target=_A , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = inputs['input_ids']
__SCREAMING_SNAKE_CASE = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = 'A, <mask> AllenNLP sentence.'
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 257
| 1
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ = "▁" , A_ = True , A_ = "<unk>" , A_ = "</s>" , A_ = "<pad>" , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCamelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCamelCase = token_dict['token']
UpperCamelCase = Tokenizer(Unigram() )
UpperCamelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCamelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A_ , add_prefix_space=A_ ),
pre_tokenizers.Digits(individual_digits=A_ ),
pre_tokenizers.Punctuation(),
] )
UpperCamelCase = decoders.Metaspace(replacement=A_ , add_prefix_space=A_ )
UpperCamelCase = TemplateProcessing(
single=F'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCamelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(A_ , A_ )
def __UpperCamelCase ( self , A_ , A_ = 8_000 , A_ = True , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = trainers.UnigramTrainer(
vocab_size=A_ , special_tokens=self.special_tokens_list , show_progress=A_ , )
if isinstance(A_ , A_ ):
UpperCamelCase = [files]
self._tokenizer.train(A_ , trainer=A_ )
self.add_unk_id()
def __UpperCamelCase ( self , A_ , A_ = 8_000 , A_ = True , ) -> Any:
"""simple docstring"""
UpperCamelCase = trainers.UnigramTrainer(
vocab_size=A_ , special_tokens=self.special_tokens_list , show_progress=A_ , )
self._tokenizer.train_from_iterator(A_ , trainer=A_ )
self.add_unk_id()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = json.loads(self._tokenizer.to_str() )
UpperCamelCase = self.special_tokens['unk']['id']
UpperCamelCase = Tokenizer.from_str(json.dumps(A_ ) )
| 366
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 110
| 0
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16
| 1
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( _snake_case : int ) -> Any:
'''simple docstring'''
random.seed(_snake_case )
np.random.seed(_snake_case )
torch.manual_seed(_snake_case )
torch.cuda.manual_seed_all(_snake_case )
# ^^ safe to call this function even if cuda is not available
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Iterable[torch.nn.Parameter] , _UpperCAmelCase : float = 0.9999 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[float, int] = 1.0 , _UpperCAmelCase : Union[float, int] = 2 / 3 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Dict[str, Any] = None , **_UpperCAmelCase : Optional[int] , ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get('max_value' , _UpperCAmelCase ) is not None:
_A = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['max_value']
if kwargs.get('min_value' , _UpperCAmelCase ) is not None:
_A = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['min_value']
_A = list(_UpperCAmelCase )
_A = [p.clone().detach() for p in parameters]
if kwargs.get('device' , _UpperCAmelCase ) is not None:
_A = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
self.to(device=kwargs['device'] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def lowerCAmelCase_ ( cls : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ):
_A , _A = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase )
_A = model_cls.from_pretrained(_UpperCAmelCase )
_A = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(_UpperCAmelCase )
return ema_model
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop('shadow_params' , _UpperCAmelCase )
model.register_to_config(**_UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int ):
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(_UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_A = max(_UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
_A = list(_UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = list(_UpperCAmelCase )
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None ):
_A = [
p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : dict ):
_A = copy.deepcopy(_UpperCAmelCase )
_A = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_A = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , _UpperCAmelCase ):
raise ValueError('Invalid min_decay' )
_A = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , _UpperCAmelCase ):
raise ValueError('Invalid optimization_step' )
_A = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , _UpperCAmelCase ):
raise ValueError('Invalid update_after_step' )
_A = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _UpperCAmelCase ):
raise ValueError('Invalid use_ema_warmup' )
_A = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_A = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_A = state_dict.get('shadow_params' , _UpperCAmelCase )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , _UpperCAmelCase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 271
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (IPNDMScheduler,)
UpperCAmelCase : Optional[Any] = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : List[Any] ):
_A = {'num_train_timesteps': 1_000}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Any ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , 'set_timesteps' ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A = dummy_past_residuals[:]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self : Tuple ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.full_loop()
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 271
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : List[str]=2_81_23 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCAmelCase_ : Union[str, Any] = set()
lowerCAmelCase_ : Any = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(A__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 120
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
pass
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any ) -> None:
lowerCAmelCase_ : Any = data
lowerCAmelCase_ : Node | None = None
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = self
lowerCAmelCase_ : Any = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase )
yield node.data
lowerCAmelCase_ : int = node.next_node
@property
def __lowercase ( self : str ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__A : Dict = Node(1)
__A : Optional[Any] = Node(2)
__A : int = Node(3)
__A : Optional[Any] = Node(4)
print(root_node.has_loop) # False
__A : Any = root_node.next_node
print(root_node.has_loop) # True
__A : List[Any] = Node(5)
__A : Dict = Node(6)
__A : str = Node(5)
__A : Dict = Node(6)
print(root_node.has_loop) # False
__A : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 120
| 1
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) -> str:
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
_a : Optional[Any] = model
_a : List[Any] = kwargs.get("""model_save_dir""" , UpperCAmelCase__ )
_a : List[str] = kwargs.get("""latest_model_name""" , UpperCAmelCase__ )
def __call__( self : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
_a : str = {k: np.array(UpperCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCAmelCase__ , UpperCAmelCase__ )
@staticmethod
def _lowercase ( UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=None ) -> Tuple:
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
_a : Any = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCAmelCase__ , providers=[provider] , sess_options=UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : Optional[str] = None , **UpperCAmelCase__ : Optional[Any] ) -> int:
_a : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_a : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
_a : Any = Path(UpperCAmelCase__ ).joinpath(UpperCAmelCase__ )
try:
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_a : List[Any] = self.model_save_dir.joinpath(UpperCAmelCase__ )
if src_path.exists():
_a : str = Path(UpperCAmelCase__ ).joinpath(UpperCAmelCase__ )
try:
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
except shutil.SameFileError:
pass
def _lowercase ( self : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : str , ) -> Tuple:
if os.path.isfile(UpperCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# saving model weights/files
self._save_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def _lowercase ( cls : Any , UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : Optional[Union[bool, str, None]] = None , UpperCAmelCase__ : Optional[Union[str, None]] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional["ort.SessionOptions"] = None , **UpperCAmelCase__ : Optional[Any] , ) -> Tuple:
_a : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCAmelCase__ ):
_a : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , provider=UpperCAmelCase__ , sess_options=UpperCAmelCase__ )
_a : Optional[int] = Path(UpperCAmelCase__ )
# load model from hub
else:
# download model
_a : Union[str, Any] = hf_hub_download(
repo_id=UpperCAmelCase__ , filename=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , revision=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , )
_a : Optional[int] = Path(UpperCAmelCase__ ).parent
_a : Union[str, Any] = Path(UpperCAmelCase__ ).name
_a : int = OnnxRuntimeModel.load_model(UpperCAmelCase__ , provider=UpperCAmelCase__ , sess_options=UpperCAmelCase__ )
return cls(model=UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def _lowercase ( cls : Tuple , UpperCAmelCase__ : Union[str, Path] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> str:
_a : str = None
if len(str(UpperCAmelCase__ ).split("""@""" ) ) == 2:
_a , _a : List[str] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCAmelCase__ , revision=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 324
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324
| 1
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowerCamelCase : Dict = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : tuple , lowerCAmelCase : Path , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[str]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ : int = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
SCREAMING_SNAKE_CASE_ : List[str] = "cpu"
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(lowerCAmelCase__ )
# VAE DECODER
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoencoderKL.from_pretrained(model_path + "/vae" )
SCREAMING_SNAKE_CASE_ : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_decoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , 2_5 , 2_5 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__lowerCamelCase : Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 18
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = 'open-llama'
def __init__( self , __lowerCAmelCase=10_0000 , __lowerCAmelCase=4096 , __lowerCAmelCase=1_1008 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=2048 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = initializer_range
lowercase = rms_norm_eps
lowercase = use_cache
lowercase = kwargs.pop(
"""use_memorry_efficient_attention""" , __lowerCAmelCase )
lowercase = hidden_dropout_prob
lowercase = attention_dropout_prob
lowercase = use_stable_embedding
lowercase = shared_input_output_embedding
lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowercase = self.rope_scaling.get("""type""" , __lowerCAmelCase )
lowercase = self.rope_scaling.get("""factor""" , __lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 197
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def snake_case ( snake_case__ :Dict) -> int:
if not is_accelerate_available():
return method
_A = version.parse(accelerate.__version__).base_version
if version.parse(a__) < version.parse("""0.17.0"""):
return method
def wrapper(self :Optional[Any] , *snake_case__ :Optional[Any] , **snake_case__ :List[str]):
if hasattr(self , """_hf_hook""") and hasattr(self._hf_hook , """pre_forward"""):
self._hf_hook.pre_forward(self)
return method(self , *a__ , **a__)
return wrapper
| 370
|
from collections.abc import Sequence
def snake_case ( snake_case__ :Sequence[float] , snake_case__ :bool = False) -> float:
if not arr:
return 0
_A = 0 if allow_empty_subarrays else float("""-inf""")
_A = 0.0
for num in arr:
_A = max(0 if allow_empty_subarrays else num , curr_sum + num)
_A = max(snake_case__ , snake_case__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 81
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase_ : List[str] = Features({"""text""": Value("""string""" )} )
UpperCAmelCase_ : Tuple = Features({"""summary""": Value("""string""" )} )
UpperCAmelCase_ : Union[str, Any] = """text"""
UpperCAmelCase_ : Dict = """summary"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
return {self.text_column: "text", self.summary_column: "summary"}
| 151
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = list(model.children() )[:-2]
__UpperCAmelCase : Tuple = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : str = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer
__UpperCAmelCase : List[str] = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max_seq_length
__UpperCAmelCase : Tuple = transforms
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : int , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Union[str, Any] = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Any = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : int = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[int] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 115
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__(self : List[str] , **a__ : Any ):
"""simple docstring"""
super().__init__(**a__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__snake_case = kwargs.pop('''encoder''' )
__snake_case = encoder_config.pop('''model_type''' )
__snake_case = kwargs.pop('''decoder''' )
__snake_case = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__snake_case = AutoConfig.for_model(a__ , **a__ )
__snake_case = AutoConfig.for_model(a__ , **a__ )
__snake_case = True
@classmethod
def a (cls : Optional[Any] , a__ : PretrainedConfig , a__ : PretrainedConfig , **a__ : Optional[Any] ):
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__snake_case = True
__snake_case = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__ )
def a (self : str ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.encoder.to_dict()
__snake_case = self.decoder.to_dict()
__snake_case = self.__class__.model_type
return output
| 351
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ = data_utils.TransfoXLTokenizer
snake_case_ = data_utils.TransfoXLCorpus
snake_case_ = data_utils
snake_case_ = data_utils
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , '''rb''' ) as fp:
__snake_case = pickle.load(snake_case_ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__snake_case = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__snake_case = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
__snake_case = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case_ )
__snake_case = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__snake_case = os.path.abspath(snake_case_ )
__snake_case = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__snake_case = TransfoXLConfig()
else:
__snake_case = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case = TransfoXLLMHeadModel(snake_case_ )
__snake_case = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
__snake_case = os.path.join(snake_case_ , snake_case_ )
__snake_case = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
snake_case_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 238
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->None:
"""simple docstring"""
a_ = len(__lowercase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowercase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowercase , __lowercase , )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = []
depth_first_search([] , [] , [] , __lowercase , __lowercase )
# Print all the boards
for board in boards:
for column in board:
print(__lowercase )
print("" )
print(len(__lowercase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 243
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 282
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = (3_2, 3_2)
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(__lowerCamelCase )
@property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
def extract(*__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ):
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.ones([0] )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.dummy_vae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_SCREAMING_SNAKE_CASE = 7_7
_SCREAMING_SNAKE_CASE = self.dummy_image.to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
_SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_SCREAMING_SNAKE_CASE = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.dummy_vae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_SCREAMING_SNAKE_CASE = 7_7
_SCREAMING_SNAKE_CASE = self.dummy_image.to(__lowerCamelCase )
# put models in fp16
_SCREAMING_SNAKE_CASE = unet.half()
_SCREAMING_SNAKE_CASE = vae.half()
_SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_SCREAMING_SNAKE_CASE = init_image.resize((7_6_0, 5_0_4) )
_SCREAMING_SNAKE_CASE = "BAAI/AltDiffusion"
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE = "A fantasy landscape, trending on artstation"
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
_SCREAMING_SNAKE_CASE = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_SCREAMING_SNAKE_CASE = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_SCREAMING_SNAKE_CASE = init_image.resize((7_6_8, 5_1_2) )
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_SCREAMING_SNAKE_CASE = "BAAI/AltDiffusion"
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE = "A fantasy landscape, trending on artstation"
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 111
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.