code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import subprocess
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(__A , shell=__A , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(__A )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__A )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(__A ) )
if len(__A ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
return values.split("," )
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
a__ : Optional[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 80 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 0 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCamelCase_ : Union[str, Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCamelCase_ : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print("""\n""".join(upper_files) + """\n""")
lowerCamelCase_ : Any = [file for file in filepaths if """ """ in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print("""\n""".join(space_files) + """\n""")
lowerCamelCase_ : Dict = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print("""\n""".join(hyphen_files) + """\n""")
lowerCamelCase_ : Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print("""\n""".join(nodir_files) + """\n""")
lowerCamelCase_ : Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 81 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
A__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _UpperCAmelCase ( snake_case , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = R""".*sequential.(\d+).*"""
_lowerCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
# replace sequential layers with list
_lowerCAmelCase = re.match(snake_case , snake_case ).group(1 )
_lowerCAmelCase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case )//3}.linear.' )
elif re.match(snake_case , snake_case ):
_lowerCAmelCase = int(re.match(snake_case , snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase = 1 if projecton_layer == 0 else 2
_lowerCAmelCase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase = value
_lowerCAmelCase = mixed_qkv.size(0 ) // 3
_lowerCAmelCase = mixed_qkv[:qkv_dim]
_lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase = mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase = query_layer
_lowerCAmelCase = key_layer
_lowerCAmelCase = value_layer
else:
_lowerCAmelCase = value
return model_state_dict
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = init_clap(snake_case , enable_fusion=snake_case )
clap_model.eval()
_lowerCAmelCase = clap_model.state_dict()
_lowerCAmelCase = rename_state_dict(snake_case )
_lowerCAmelCase = ClapConfig()
_lowerCAmelCase = enable_fusion
_lowerCAmelCase = ClapModel(snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case , strict=snake_case )
model.save_pretrained(snake_case )
transformers_config.save_pretrained(snake_case )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
A__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 85 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase__ ( lowercase ):
def __init__( self : int ,lowerCamelCase__ : List[Any]=0.0_1 ,lowerCamelCase__ : Dict=1000 ):
'''simple docstring'''
_UpperCamelCase : str = p_stop
_UpperCamelCase : List[Any] = max_length
def __iter__( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase : Dict = random.random() < self.p_stop
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Union[str, Any]=True ):
'''simple docstring'''
_UpperCamelCase : str = [
BatchSamplerShard(lowerCamelCase__ ,2 ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
for i in range(2 )
]
_UpperCamelCase : Dict = [list(lowerCamelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase__ ) for shard in batch_sampler_shards] ,[len(lowerCamelCase__ ) for e in expected] )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCamelCase : Any = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : int = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase : List[str] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase : Any = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase : List[Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : int = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Any = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
_UpperCamelCase : int = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase : Optional[int] = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : Any = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase : List[str] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : List[Any] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : Optional[Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCamelCase : List[Any] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = BatchSampler(range(24 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase : List[str] = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Tuple = BatchSampler(range(21 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase : Union[str, Any] = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(22 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase : List[str] = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : str = BatchSampler(range(20 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : str = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : List[str] = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,even_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
_UpperCamelCase : List[Any] = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Any = BatchSampler(range(24 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase : int = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Tuple = BatchSampler(range(22 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase : Tuple = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = BatchSampler(range(21 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
# Check the shards when the dataset is very small.
_UpperCamelCase : List[Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : str = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(lowerCamelCase__ ,lowerCamelCase__ ,split_batches=lowerCamelCase__ ,even_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Any = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase : Optional[Any] = [BatchSamplerShard(lowerCamelCase__ ,2 ,lowerCamelCase__ ,even_batches=lowerCamelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 10, 11]] )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str=False ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Optional[int]=False ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
_UpperCamelCase : str = list(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [
IterableDatasetShard(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,drop_last=lowerCamelCase__ ,num_processes=lowerCamelCase__ ,process_index=lowerCamelCase__ ,split_batches=lowerCamelCase__ ,)
for i in range(lowerCamelCase__ )
]
_UpperCamelCase : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase__ )
iterable_dataset_lists.append(list(lowerCamelCase__ ) )
_UpperCamelCase : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase : List[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
self.assertTrue(len(lowerCamelCase__ ) % shard_batch_size == 0 )
_UpperCamelCase : str = []
for idx in range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
reference += reference
self.assertListEqual(lowerCamelCase__ ,reference[: len(lowerCamelCase__ )] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = 42
_UpperCamelCase : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
# Edge case with a very small dataset
_UpperCamelCase : List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
self.check_iterable_dataset_shards(lowerCamelCase__ ,lowerCamelCase__ ,batch_size=4 ,drop_last=lowerCamelCase__ ,split_batches=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = BatchSampler(range(16 ) ,batch_size=4 ,drop_last=lowerCamelCase__ )
_UpperCamelCase : List[Any] = SkipBatchSampler(lowerCamelCase__ ,2 )
self.assertListEqual(list(lowerCamelCase__ ) ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = SkipDataLoader(list(range(16 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = DataLoader(list(range(16 ) ) ,batch_size=4 )
_UpperCamelCase : Optional[int] = skip_first_batches(lowerCamelCase__ ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = DataLoaderShard(list(range(16 ) ) ,batch_size=4 )
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
Accelerator()
_UpperCamelCase : List[Any] = DataLoaderDispatcher(range(16 ) ,batch_size=4 )
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 83 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCAmelCase = 2_99_79_24_58
# Symbols
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = symbols('ct x y z')
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(lowercase__ ) ** 2 )
def _snake_case ( lowercase__ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(lowercase__ ), -gamma(lowercase__ ) * beta(lowercase__ ), 0, 0],
[-gamma(lowercase__ ) * beta(lowercase__ ), gamma(lowercase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _snake_case ( lowercase__ : float , lowercase__ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
lowerCAmelCase_ :Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCAmelCase = transform(29_97_92_45)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__UpperCAmelCase = {ct: c, x: 1, y: 1, z: 1}
__UpperCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 84 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[int] ):
'''simple docstring'''
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85 | 0 |
UpperCamelCase = range(2, 20 + 1)
UpperCamelCase = [10**k for k in range(ks[-1] + 1)]
UpperCamelCase = {}
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any):
lowercase__ : List[str] = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase)))
lowercase__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase) , _lowerCamelCase)))
lowercase__ , lowercase__ : str = 0, 0
lowercase__ : Optional[Any] = n - i
lowercase__ : Tuple = memo.get(_lowerCamelCase)
if sub_memo is not None:
lowercase__ : List[str] = sub_memo.get(_lowerCamelCase)
if jumps is not None and len(_lowerCamelCase) > 0:
# find and make the largest jump without going over
lowercase__ : Optional[int] = -1
for _k in range(len(_lowerCamelCase) - 1 , -1 , -1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : Optional[int] = _k
break
if max_jump >= 0:
lowercase__ , lowercase__ , lowercase__ : List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : int = diff + c
for j in range(min(_lowerCamelCase , len(_lowerCamelCase))):
lowercase__ , lowercase__ : List[str] = divmod(_lowerCamelCase , 10)
if new_c > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ : Optional[int] = []
else:
lowercase__ : int = {c: []}
lowercase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ , lowercase__ : Union[str, Any] = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ , lowercase__ : List[str] = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase)
diff += _diff
dn += terms_jumped
lowercase__ : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : Dict = 0
while j < len(_lowerCamelCase):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCamelCase , (diff, dn, k))
return (diff, dn)
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]):
if i >= n:
return 0, i
if k > len(_lowerCamelCase):
a_i.extend([0 for _ in range(k - len(_lowerCamelCase))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : Dict = i
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = 0, 0, 0
for j in range(len(_lowerCamelCase)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : Union[str, Any] = ds_c + ds_b
diff += addend
lowercase__ : Union[str, Any] = 0
for j in range(_lowerCamelCase):
lowercase__ : Any = a_i[j] + addend
lowercase__ , lowercase__ : Optional[Any] = divmod(_lowerCamelCase , 10)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return diff, i - start_i
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]):
for j in range(_lowerCamelCase , len(_lowerCamelCase)):
lowercase__ : Tuple = digits[j] + addend
if s >= 10:
lowercase__ , lowercase__ : List[Any] = divmod(_lowerCamelCase , 10)
lowercase__ : Dict = addend // 10 + quotient
else:
lowercase__ : Union[str, Any] = s
lowercase__ : Optional[int] = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ , lowercase__ : int = divmod(_lowerCamelCase , 10)
digits.append(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int = 10**15):
lowercase__ : str = [1]
lowercase__ : Dict = 1
lowercase__ : Optional[int] = 0
while True:
lowercase__ , lowercase__ : Any = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase)
dn += terms_jumped
if dn == n - i:
break
lowercase__ : int = 0
for j in range(len(_lowerCamelCase)):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"{solution() = }")
| 87 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [int(A_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(A_ ) == 4 and all(0 <= int(A_ ) <= 254 for octet in octets )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = input().strip()
__lowerCAmelCase : int = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 88 |
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__lowerCAmelCase = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Dict:
# Initialise PyTorch model
_a : Union[str, Any] = XLNetConfig.from_json_file(lowerCAmelCase_ )
_a : Any = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_a : List[Any] = finetuning_task
_a : str = GLUE_TASKS_NUM_LABELS[finetuning_task]
_a : Dict = XLNetForSequenceClassification(lowerCAmelCase_ )
elif "squad" in finetuning_task:
_a : int = finetuning_task
_a : List[Any] = XLNetForQuestionAnswering(lowerCAmelCase_ )
else:
_a : Dict = XLNetLMHeadModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
_a : Tuple = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : str = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {os.path.abspath(lowerCAmelCase_ )}""" )
torch.save(model.state_dict() , lowerCAmelCase_ )
print(f"""Save configuration file to {os.path.abspath(lowerCAmelCase_ )}""" )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
__lowerCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
from collections.abc import Sequence
def lowerCamelCase_ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def lowerCamelCase_ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
__lowerCamelCase = 0.0
for coeff in reversed(UpperCamelCase__ ):
__lowerCamelCase = result * x + coeff
return result
if __name__ == "__main__":
__A = (0.0, 0.0, 5.0, 9.3, 7.0)
__A = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 90 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
"""simple docstring"""
def _A (__a , __a ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def _A () -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 91 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 0 |
from math import factorial
def _a ( SCREAMING_SNAKE_CASE_ : int = 20 ):
__lowerCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowerCAmelCase = n // 2
return int(factorial(SCREAMING_SNAKE_CASE_ ) / (factorial(SCREAMING_SNAKE_CASE_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 92 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def _snake_case ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ort.SessionOptions()
lowercase_ : List[Any] = False
return options
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = '''A red cat sitting on a park bench'''
lowercase_ : Optional[Any] = np.random.RandomState(0 )
lowercase_ : str = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : int = output.images
lowercase_ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase_ : int = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = '''A red cat sitting on a park bench'''
lowercase_ : Dict = np.random.RandomState(0 )
lowercase_ : int = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images
lowercase_ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase_ : List[str] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 93 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Tuple = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
a :Dict = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
snake_case : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 94 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85 | 0 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Optional[Any] =2
a__ : Dict =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def _snake_case ( lowercase__ , lowercase__ ):
return np.sqrt(np.sum((np.asarray(lowercase__ ) - np.asarray(lowercase__ )) ** 2 ) )
def _snake_case ( lowercase__ , lowercase__ ):
return sum((va - va) ** 2 for va, va in zip(lowercase__ , lowercase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def _snake_case ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark() | 96 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = 1
UpperCamelCase__ :Tuple = 3
UpperCamelCase__ :Union[str, Any] = (32, 32)
UpperCamelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Dict = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :str = self.dummy_cond_unet_upscale
UpperCamelCase__ :Tuple = DDPMScheduler()
UpperCamelCase__ :List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ :Union[str, Any] = self.dummy_vae
UpperCamelCase__ :Any = self.dummy_text_encoder
UpperCamelCase__ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ :Dict = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
UpperCamelCase__ :str = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
UpperCamelCase__ :Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ :int = output.images
UpperCamelCase__ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
UpperCamelCase__ :str = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
UpperCamelCase__ :List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ :Dict = image_from_tuple[0, -3:, -3:, -1]
UpperCamelCase__ :Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCamelCase__ :Tuple = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :List[str] = self.dummy_cond_unet_upscale
UpperCamelCase__ :Tuple = DDPMScheduler()
UpperCamelCase__ :Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ :Dict = self.dummy_vae
UpperCamelCase__ :Any = self.dummy_text_encoder
UpperCamelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ :Dict = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
UpperCamelCase__ :int = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :int = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ :List[str] = output.images
assert image.shape[0] == 2
UpperCamelCase__ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase__ :List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.dummy_cond_unet_upscale
UpperCamelCase__ :List[Any] = DDPMScheduler()
UpperCamelCase__ :Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCamelCase__ :Optional[int] = self.dummy_vae
UpperCamelCase__ :Union[str, Any] = self.dummy_text_encoder
UpperCamelCase__ :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCamelCase__ :Optional[int] = unet.half()
UpperCamelCase__ :List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ :Any = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
UpperCamelCase__ :Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Any = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Dict = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , ).images
UpperCamelCase__ :Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
UpperCamelCase__ :List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ :Any = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Optional[Any] = '''a cat sitting on a park bench'''
UpperCamelCase__ :Optional[int] = torch.manual_seed(0 )
UpperCamelCase__ :List[str] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
UpperCamelCase__ :Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ :Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
UpperCamelCase__ :Tuple = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ :int = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :int = '''a cat sitting on a park bench'''
UpperCamelCase__ :int = torch.manual_seed(0 )
UpperCamelCase__ :int = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
UpperCamelCase__ :Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCamelCase__ :Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCamelCase__ :List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :List[Any] = '''a cat sitting on a park bench'''
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='''np''' , )
UpperCamelCase__ :Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 97 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ""
snake_case__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[DatasetInfo] = None ,lowerCamelCase__ : Optional[str] = None ,**lowerCamelCase__ : Optional[int] ,):
super().__init__(self ,**lowerCamelCase__ )
UpperCAmelCase__ = repo_info
UpperCAmelCase__ = token
UpperCAmelCase__ = None
def __lowerCAmelCase ( self : int ):
if self.dir_cache is None:
UpperCAmelCase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(lowerCamelCase__ ): {'name': str(lowerCamelCase__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : str = "rb" ,**lowerCamelCase__ : str ,):
if not isinstance(self.repo_info ,lowerCamelCase__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
UpperCAmelCase__ = hf_hub_url(self.repo_info.id ,lowerCamelCase__ ,revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase__ ,mode=lowerCamelCase__ ,headers=get_authentication_headers_for_url(lowerCamelCase__ ,use_auth_token=self.token ) ,client_kwargs={'trust_env': True} ,).open()
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ):
self._get_dirs()
UpperCAmelCase__ = self._strip_protocol(lowerCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int]=False ,**lowerCamelCase__ : List[str] ):
self._get_dirs()
UpperCAmelCase__ = PurePosixPath(path.strip('/' ) )
UpperCAmelCase__ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ = PurePosixPath(p.strip('/' ) )
UpperCAmelCase__ = p.parent
if root == path:
UpperCAmelCase__ = f
UpperCAmelCase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
def A_ ( A__ , A__ ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 99 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor()
_SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
return new_state_dict
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 800 if """detection""" in checkpoint_url else 1000
__SCREAMING_SNAKE_CASE = target_max_size / current_max_size
__SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = F.to_tensor(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
logger.info("""Converting model...""" )
# load original state dict
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
# create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 15
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = {0: """table""", 1: """table rotated"""}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE = 125
__SCREAMING_SNAKE_CASE = 6
__SCREAMING_SNAKE_CASE = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
__SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion
__SCREAMING_SNAKE_CASE = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase_ ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = (1, 15, 3)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__SCREAMING_SNAKE_CASE = (1, 125, 7)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
__SCREAMING_SNAKE_CASE = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCamelCase_ )
image_processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 100 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "upernet"
def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a__ , a__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(a__ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ :Optional[Any] = parse(importlib.metadata.version("torch"))
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = parse(importlib.metadata.version(lowerCAmelCase__ ) )
return operation(lowerCAmelCase__ , parse(lowerCAmelCase__ ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 101 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
self.test()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = 0
__snake_case : Tuple = False
while not completed:
if counter == 1:
self.reset()
__snake_case : Dict = self.advance()
if not self.does_advance(a_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__snake_case , __snake_case , __snake_case : Tuple = self.update(a_ )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
super(a_ , self ).__init__()
if not isinstance(a_ , a_ ) or len(a_ ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
__snake_case : str = token_ids
__snake_case : Optional[Any] = len(self.token_ids )
__snake_case : Dict = -1 # the index of the currently fulfilled step
__snake_case : List[str] = False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}""" )
__snake_case : Optional[Any] = False
__snake_case : Tuple = False
__snake_case : Optional[Any] = False
if self.does_advance(a_ ):
self.fulfilled_idx += 1
__snake_case : Optional[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
__snake_case : List[str] = True
__snake_case : Tuple = completed
else:
# failed to make progress.
__snake_case : Union[str, Any] = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = False
__snake_case : Optional[int] = 0
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
__snake_case : Union[str, Any] = PhrasalConstraint(self.token_ids )
if stateful:
__snake_case : List[Any] = self.seqlen
__snake_case : str = self.fulfilled_idx
__snake_case : Union[str, Any] = self.completed
return new_constraint
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=True ):
'''simple docstring'''
__snake_case : List[str] = max([len(a_ ) for one in nested_token_ids] )
__snake_case : str = {}
for token_ids in nested_token_ids:
__snake_case : Optional[Any] = root
for tidx, token_id in enumerate(a_ ):
if token_id not in level:
__snake_case : Optional[Any] = {}
__snake_case : List[Any] = level[token_id]
if no_subsets and self.has_subsets(a_ , a_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f""" {nested_token_ids}.""" )
__snake_case : Any = root
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.trie
for current_token in current_seq:
__snake_case : str = start[current_token]
__snake_case : Tuple = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.next_tokens(a_ )
return len(a_ ) == 0
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = list(root.values() )
if len(a_ ) == 0:
return 1
else:
return sum([self.count_leaves(a_ ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = self.count_leaves(a_ )
return len(a_ ) != leaf_count
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
super(a_ , self ).__init__()
if not isinstance(a_ , a_ ) or len(a_ ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(a_ , a_ ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
__snake_case : Union[str, Any] = DisjunctiveTrie(a_ )
__snake_case : int = nested_token_ids
__snake_case : List[Any] = self.trie.max_height
__snake_case : Optional[Any] = []
__snake_case : List[Any] = False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.trie.next_tokens(self.current_seq )
if len(a_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}""" )
__snake_case : List[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}""" )
__snake_case : Any = False
__snake_case : int = False
__snake_case : Optional[Any] = False
if self.does_advance(a_ ):
self.current_seq.append(a_ )
__snake_case : List[Any] = True
else:
__snake_case : Optional[Any] = True
self.reset()
__snake_case : Union[str, Any] = self.trie.reached_leaf(self.current_seq )
__snake_case : List[Any] = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = False
__snake_case : List[str] = []
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
__snake_case : Optional[int] = DisjunctiveConstraint(self.token_ids )
if stateful:
__snake_case : List[str] = self.seqlen
__snake_case : Optional[Any] = self.current_seq
__snake_case : Optional[int] = self.completed
return new_constraint
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : int = constraints
# max # of steps required to fulfill a given constraint
__snake_case : List[str] = max([c.seqlen for c in constraints] )
__snake_case : List[str] = len(a_ )
__snake_case : Optional[Any] = False
self.init_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = []
__snake_case : List[str] = None
__snake_case : Dict = [constraint.copy(stateful=a_ ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__snake_case : List[Any] = constraint.advance()
if isinstance(a_ , a_ ):
token_list.append(a_ )
elif isinstance(a_ , a_ ):
token_list.extend(a_ )
else:
__snake_case : Dict = self.inprogress_constraint.advance()
if isinstance(a_ , a_ ):
token_list.append(a_ )
elif isinstance(a_ , a_ ):
token_list.extend(a_ )
if len(a_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__snake_case , __snake_case : List[str] = self.add(a_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
__snake_case , __snake_case : Dict = False, False
if self.completed:
__snake_case : Dict = True
__snake_case : Optional[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__snake_case , __snake_case , __snake_case : Tuple = self.inprogress_constraint.update(a_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) )
__snake_case : str = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__snake_case : List[str] = None
if len(self.pending_constraints ) == 0:
# we're done!
__snake_case : int = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a_ ):
__snake_case , __snake_case , __snake_case : Any = pending_constraint.update(a_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(a_ )
__snake_case : List[Any] = None
if not complete and stepped:
__snake_case : Tuple = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__snake_case : str = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__snake_case : Optional[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE (self , a_=True ):
'''simple docstring'''
__snake_case : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__snake_case : List[str] = [
constraint.copy(stateful=a_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__snake_case : int = self.inprogress_constraint.copy(stateful=a_ )
__snake_case : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 102 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''onnx''']
def __init__( self : str , *A_ : Dict , **A_ : Union[str, Any]):
requires_backends(self , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A_ : List[str] , **A_ : Optional[Any]):
requires_backends(cls , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A_ : Dict , **A_ : List[str]):
requires_backends(cls , ['''onnx'''])
| 103 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCAmelCase__ = True
from torch.cuda.amp import autocast
lowerCAmelCase__ = logging.getLogger(__name__)
def _A ( A__=None , A__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=lowerCamelCase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : WavaVecaProcessor
SCREAMING_SNAKE_CASE : Union[bool, str] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self : Union[str, Any] ,lowercase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowercase = [{'''input_values''': feature['''input_values''']} for feature in features]
__lowercase = [{'''input_ids''': feature['''labels''']} for feature in features]
__lowercase = self.processor.pad(
lowercase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' ,)
__lowercase = self.processor.pad(
labels=lowercase__ ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors='''pt''' ,)
# replace padding with -100 to ignore loss correctly
__lowercase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) ,-1_0_0 )
__lowercase = labels
return batch
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : nn.Module ,lowercase__ : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
__lowercase = self._prepare_inputs(lowercase__ )
if self.use_amp:
with autocast():
__lowercase = self.compute_loss(lowercase__ ,lowercase__ )
else:
__lowercase = self.compute_loss(lowercase__ ,lowercase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowercase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowercase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
__lowercase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowercase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase__ )
else:
loss.backward()
return loss.detach()
def _A ( ):
"""simple docstring"""
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowercase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__lowercase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__lowercase = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(A__ ):
__lowercase = re.sub(A__ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__lowercase = train_dataset.map(A__ , remove_columns=['''sentence'''] )
__lowercase = eval_dataset.map(A__ , remove_columns=['''sentence'''] )
def extract_all_chars(A__ ):
__lowercase = ''' '''.join(batch['''text'''] )
__lowercase = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowercase = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
__lowercase = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
__lowercase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__lowercase = {v: k for k, v in enumerate(A__ )}
__lowercase = vocab_dict[''' ''']
del vocab_dict[" "]
__lowercase = len(A__ )
__lowercase = len(A__ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
__lowercase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
__lowercase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__lowercase = min(len(A__ ) , data_args.max_train_samples )
__lowercase = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
__lowercase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowercase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A__ ):
__lowercase , __lowercase = torchaudio.load(batch['''path'''] )
__lowercase = resampler(A__ ).squeeze().numpy()
__lowercase = 16000
__lowercase = batch['''text''']
return batch
__lowercase = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__lowercase = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
__lowercase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(A__ )
return batch
__lowercase = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
__lowercase = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__lowercase = datasets.load_metric('''wer''' )
def compute_metrics(A__ ):
__lowercase = pred.predictions
__lowercase = np.argmax(A__ , axis=-1 )
__lowercase = processor.tokenizer.pad_token_id
__lowercase = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
__lowercase = processor.batch_decode(pred.label_ids , group_tokens=A__ )
__lowercase = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowercase = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
__lowercase = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowercase = model_args.model_name_or_path
else:
__lowercase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowercase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
__lowercase = train_result.metrics
__lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
__lowercase = min(A__ , len(A__ ) )
trainer.log_metrics('''train''' , A__ )
trainer.save_metrics('''train''' , A__ )
trainer.save_state()
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
__lowercase = min(A__ , len(A__ ) )
trainer.log_metrics('''eval''' , A__ )
trainer.save_metrics('''eval''' , A__ )
return results
if __name__ == "__main__":
main()
| 104 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85 | 0 |
"""simple docstring"""
import argparse
import os
import re
a : List[Any] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a : List[str] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
a : str = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : bool = False ) ->Any:
'''simple docstring'''
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a : List[Any] = f.read()
a : Dict = content.split("\n" )
a : List[str] = []
a : Any = 0
while line_idx < len(_lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a : List[str] = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a : int = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a : int = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a : str = sorted(_lowercase , key=lambda _lowercase : _re_identifier.search(_lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(_lowercase ) )
elif "\n".join(_lowercase ) != content:
return True
def _SCREAMING_SNAKE_CASE ( _lowercase : bool = False ) ->Any:
'''simple docstring'''
a : List[str] = [os.path.join(_lowercase , _lowercase ) for f in os.listdir(_lowercase ) if f.endswith(".py" )]
a : Union[str, Any] = [sort_auto_mapping(_lowercase , overwrite=_lowercase ) for fname in fnames]
if not overwrite and any(_lowercase ):
a : List[Any] = [f for f, d in zip(_lowercase , _lowercase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(_lowercase )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 105 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 85 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
self.assertTrue(isinstance(dc.token_ids ,lowercase_ ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : Tuple ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(lowercase_ ) # fails here
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
lowerCAmelCase__ : int = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
lowerCAmelCase__ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = dc.update(3 )
lowerCAmelCase__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ : List[Any] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 106 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCAmelCase : List[str] = HfArgumentParser(InitializationArguments)
__lowerCAmelCase : List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCAmelCase : str = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__lowerCAmelCase : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 107 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[int] ):
'''simple docstring'''
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 108 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85 | 0 |
"""simple docstring"""
A: Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A: List[str] = [{"type": "code", "content": INSTALL_CONTENT}]
A: Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 109 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( UpperCamelCase__ ):
_lowercase : Dict = ['''image_processor''', '''tokenizer''']
_lowercase : Union[str, Any] = '''BlipImageProcessor'''
_lowercase : Tuple = '''AutoTokenizer'''
def __init__( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ) -> Any:
"""simple docstring"""
lowercase__ = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.image_processor
def __call__( self: Tuple , UpperCamelCase_: ImageInput = None , UpperCamelCase_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Union[bool, str, TruncationStrategy] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, TensorType]] = None , **UpperCamelCase_: List[Any] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
# add pixel_values
lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
if text is not None:
lowercase__ = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self: List[str] , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 110 |
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = 3
UpperCAmelCase = 2_5_0
UpperCAmelCase = ids_tensor((batch_size, length) , a__ )
UpperCAmelCase = torch.ones((batch_size, length) , device=a__ , dtype=torch.float ) / length
return input_ids, scores
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(a__ , a__ ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = MaxLengthCriteria(max_length=1_0 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(a__ , a__ ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(a__ , a__ ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a__ , a__ ) )
def _lowercase ( self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(a__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(a__ ) , 1 )
| 273 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class snake_case__ ( lowercase_):
a_ = "gpt_neox"
def __init__( self : Tuple , _A : List[Any]=5_04_32 , _A : List[Any]=61_44 , _A : Optional[Any]=44 , _A : int=64 , _A : Optional[Any]=2_45_76 , _A : Optional[Any]="gelu" , _A : str=0.25 , _A : List[Any]=1_00_00 , _A : Tuple=0.0 , _A : Optional[Any]=0.0 , _A : Optional[int]=0.1 , _A : Optional[Any]=20_48 , _A : Optional[int]=0.02 , _A : List[str]=1e-5 , _A : Optional[int]=True , _A : int=0 , _A : Optional[int]=2 , _A : Union[str, Any]=False , _A : Optional[Any]=True , _A : Optional[Any]=None , **_A : Tuple , ) -> Union[str, Any]:
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[str] = rotary_pct
UpperCAmelCase_ : Any = rotary_emb_base
UpperCAmelCase_ : str = attention_dropout
UpperCAmelCase_ : Optional[int] = hidden_dropout
UpperCAmelCase_ : List[str] = classifier_dropout
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : List[str] = tie_word_embeddings
UpperCAmelCase_ : Optional[Any] = use_parallel_residual
UpperCAmelCase_ : str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def A ( self : int ) -> Any:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
UpperCAmelCase_ : Optional[int] = self.rope_scaling.get('''type''' , a__ )
UpperCAmelCase_ : List[str] = self.rope_scaling.get('''factor''' , a__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(a__ , a__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 304 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class __snake_case :
def __init__( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: Tuple = question_encoder
a__: List[str] = generator
a__: Optional[Any] = self.question_encoder
def lowerCamelCase_ ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
if os.path.isfile(a__):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(a__ , exist_ok=a__)
a__: Optional[int] = os.path.join(a__ , 'question_encoder_tokenizer')
a__: Optional[int] = os.path.join(a__ , 'generator_tokenizer')
self.question_encoder.save_pretrained(a__)
self.generator.save_pretrained(a__)
@classmethod
def lowerCamelCase_ ( cls , lowercase , **lowercase) -> List[str]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
a__: Optional[int] = kwargs.pop('config' , a__)
if config is None:
a__: Tuple = RagConfig.from_pretrained(a__)
a__: Optional[int] = AutoTokenizer.from_pretrained(
a__ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
a__: str = AutoTokenizer.from_pretrained(
a__ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=a__ , generator=a__)
def __call__( self , *lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
return self.current_tokenizer(*a__ , **a__)
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
return self.generator.batch_decode(*a__ , **a__)
def lowerCamelCase_ ( self , *lowercase , **lowercase) -> List[Any]:
'''simple docstring'''
return self.generator.decode(*a__ , **a__)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Any = self.question_encoder
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = self.generator
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , a__ , )
if max_length is None:
a__: int = self.current_tokenizer.model_max_length
a__: int = self(
a__ , add_special_tokens=a__ , return_tensors=a__ , max_length=a__ , padding=a__ , truncation=a__ , **a__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
a__: Tuple = self.current_tokenizer.model_max_length
a__: Dict = self(
text_target=a__ , add_special_tokens=a__ , return_tensors=a__ , padding=a__ , max_length=a__ , truncation=a__ , **a__ , )
a__: Optional[int] = labels['input_ids']
return model_inputs
| 290 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase : int = {
"169M": 1_2,
"430M": 2_4,
"1B5": 2_4,
"3B": 3_2,
"7B": 3_2,
"14B": 4_0,
}
lowercase : Dict = {
"169M": 7_6_8,
"430M": 1_0_2_4,
"1B5": 2_0_4_8,
"3B": 2_5_6_0,
"7B": 4_0_9_6,
"14B": 5_1_2_0,
}
def A_ ( A__ ) -> Any:
a__ : Union[str, Any] = list(state_dict.keys() )
for name in state_dict_keys:
a__ : List[Any] = state_dict.pop(A__ )
# emb -> embedding
if name.startswith('emb.' ):
a__ : str = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
a__ : int = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
a__ : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , A__ )
# ffn -> feed_forward
a__ : int = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , A__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
a__ : Dict = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
a__ : Any = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
a__ : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
a__ : Dict = 'rwkv.' + name
a__ : Dict = weight
return state_dict
def A_ ( A__ , A__ , A__ , A__=None , A__=None , A__=False , A__=None ) -> Dict:
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
a__ : List[str] = 5_0277
a__ : Tuple = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
a__ : Dict = PreTrainedTokenizerFast(tokenizer_file=A__ )
a__ : List[str] = len(A__ )
tokenizer.save_pretrained(A__ )
# 2. Build the config
a__ : Any = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
a__ : Dict = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
a__ : Optional[int] = RwkvConfig(
vocab_size=A__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(A__ )
# 3. Download model file then convert state_dict
a__ : int = hf_hub_download(A__ , A__ )
a__ : Tuple = torch.load(A__ , map_location='cpu' )
a__ : str = convert_state_dict(A__ )
# 4. Split in shards and save
a__ , a__ : str = shard_checkpoint(A__ )
for shard_file, shard in shards.items():
torch.save(A__ , os.path.join(A__ , A__ ) )
if index is not None:
a__ : str = os.path.join(A__ , A__ )
# Save the index as well
with open(A__ , 'w' , encoding='utf-8' ) as f:
a__ : int = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
a__ : Tuple = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
a__ : Dict = torch.load(os.path.join(A__ , A__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(A__ , A__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
a__ : Optional[int] = AutoModelForCausalLM.from_pretrained(A__ )
model.push_to_hub(A__ , max_shard_size='2GB' )
tokenizer.push_to_hub(A__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowercase : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 99 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase_ ):
"""simple docstring"""
snake_case = (DPMSolverSinglestepScheduler,)
snake_case = (("num_inference_steps", 25),)
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Dict = dict(self.forward_default_kwargs )
A_ : int = kwargs.pop('''num_inference_steps''' , a__ )
A_ : List[str] = self.dummy_sample
A_ : List[Any] = 0.1 * sample
A_ : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A_ : Union[str, Any] = self.get_scheduler_config(**a__ )
A_ : Optional[int] = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A_ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A_ : Optional[int] = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A_ : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ , A_ : List[str] = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
A_ : str = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ : int = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : List[str] = dict(self.forward_default_kwargs )
A_ : Union[str, Any] = kwargs.pop('''num_inference_steps''' , a__ )
A_ : Any = self.dummy_sample
A_ : Tuple = 0.1 * sample
A_ : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A_ : Optional[int] = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A_ : Dict = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
A_ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ : Union[str, Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A_ : Union[str, Any] = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
if scheduler is None:
A_ : Any = self.scheduler_classes[0]
A_ : Optional[int] = self.get_scheduler_config(**a__ )
A_ : Dict = scheduler_class(**a__ )
A_ : Optional[int] = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config(**a__ )
A_ : Tuple = scheduler_class(**a__ )
A_ : Tuple = 10
A_ : Union[str, Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A_ : str = model(a__ , a__ )
A_ : Optional[int] = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A_ : int = 50
A_ : Any = self.dummy_model()
A_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A_ : List[Any] = model(a__ , a__ )
A_ : Optional[Any] = scheduler.step(a__ , a__ , a__ ).prev_sample
A_ : str = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def _snake_case ( self )->List[str]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Any = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A_ : int = self.full_loop(scheduler=a__ )
A_ : int = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
A_ : str = DEISMultistepScheduler.from_config(scheduler.config )
A_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A_ : int = UniPCMultistepScheduler.from_config(scheduler.config )
A_ : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A_ : str = self.full_loop(scheduler=a__ )
A_ : List[str] = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def _snake_case ( self )->Dict:
'''simple docstring'''
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def _snake_case ( self )->int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def _snake_case ( self )->Any:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
A_ : Any = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def _snake_case ( self )->int:
'''simple docstring'''
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self )->List[str]:
'''simple docstring'''
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def _snake_case ( self )->Tuple:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : int = self.full_loop()
A_ : Dict = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[Any] = self.full_loop(use_karras_sigmas=a__ )
A_ : Optional[int] = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def _snake_case ( self )->int:
'''simple docstring'''
A_ : str = self.full_loop(prediction_type='''v_prediction''' )
A_ : Dict = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
A_ : Any = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
A_ : str = scheduler_class(**a__ )
A_ : Optional[Any] = 10
A_ : List[str] = self.dummy_model()
A_ : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A_ : Any = model(a__ , a__ )
A_ : Dict = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 186 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self , a , a=13 , a=32 , a=2 , a=3 , a=16 , a=[32, 64, 1_28] , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=10 , a=8 , a=["stage1", "stage2"] , a=[1, 2] , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = use_absolute_embeddings
UpperCamelCase__ = patch_norm
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = is_training
UpperCamelCase__ = scope
UpperCamelCase__ = use_labels
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = encoder_stride
UpperCamelCase__ = out_features
UpperCamelCase__ = out_indices
def __a ( self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def __a ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __a ( self , a , a , a ):
UpperCamelCase__ = FocalNetModel(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ = model(a__ )
UpperCamelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __a ( self , a , a , a ):
UpperCamelCase__ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase__ = None
UpperCamelCase__ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self , a , a , a ):
UpperCamelCase__ = FocalNetForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FocalNetForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self , a , a , a ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( lowercase_ , lowercase_ , unittest.TestCase ):
__UpperCAmelCase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __a ( self ):
UpperCamelCase__ = FocalNetModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=a__ , embed_dim=37 , has_text_modality=a__ )
def __a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ):
return
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __a ( self ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __a ( self ):
pass
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ = model_class(a__ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def __a ( self , a , a , a , a ):
UpperCamelCase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(a__ , a__ ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# FocalNet has a different seq_length
UpperCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = reshaped_hidden_states[0].shape
UpperCamelCase__ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@slow
def __a ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = FocalNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __a ( self ):
UpperCamelCase__ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(a__ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**a__ )
# verify the logits
UpperCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCamelCase__ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowercase_ ( lowercase_ , unittest.TestCase ):
__UpperCAmelCase = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCAmelCase = FocalNetConfig
__UpperCAmelCase = False
def __a ( self ):
UpperCamelCase__ = FocalNetModelTester(self )
| 80 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _a : Union[str, Any] , _a : str=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase_ : str = n - 1
UpperCAmelCase_ : int = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase_ : str = 0
while count < prec:
UpperCAmelCase_ : Optional[int] = random.randint(2 , n - 1 )
UpperCAmelCase_ : List[str] = bin_exp_mod(_a , _a , _a )
if b != 1:
UpperCAmelCase_ : str = True
for _ in range(_a ):
if b == n - 1:
UpperCAmelCase_ : str = False
break
UpperCAmelCase_ : Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 345 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase ( __lowerCamelCase : Optional[int] ) ->Dict:
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def lowerCamelCase ( __lowerCamelCase : str ) ->Tuple:
for char in word:
_SCREAMING_SNAKE_CASE = ord(__lowerCamelCase )
if not _is_chinese_char(__lowerCamelCase ):
return 0
return 1
def lowerCamelCase ( __lowerCamelCase : List[str] ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = set()
for token in tokens:
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) > 1 and is_chinese(__lowerCamelCase )
if chinese_word:
word_set.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
return word_list
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : set() ) ->str:
if not chinese_word_set:
return bert_tokens
_SCREAMING_SNAKE_CASE = max([len(__lowerCamelCase ) for w in chinese_word_set] )
_SCREAMING_SNAKE_CASE = bert_tokens
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, len(__lowerCamelCase )
while start < end:
_SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
_SCREAMING_SNAKE_CASE = min(end - start , __lowerCamelCase )
for i in range(__lowerCamelCase , 1 , -1 ):
_SCREAMING_SNAKE_CASE = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_SCREAMING_SNAKE_CASE = """##""" + bert_word[j]
_SCREAMING_SNAKE_CASE = start + i
_SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : LTP , __lowerCamelCase : BertTokenizer ) ->Dict:
_SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
_SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_SCREAMING_SNAKE_CASE = [get_chinese_word(__lowerCamelCase ) for r in res]
ltp_res.extend(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
_SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCamelCase , truncation=__lowerCamelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = []
for id in input_ids:
_SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(__lowerCamelCase )
input_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = add_sub_symbol(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCamelCase ):
if token[:2] == "##":
_SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(__lowerCamelCase ) == 1 and _is_chinese_char(ord(__lowerCamelCase ) ):
ref_id.append(__lowerCamelCase )
ref_ids.append(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
return ref_ids
def lowerCamelCase ( __lowerCamelCase : int ) ->List[str]:
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(__lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
_SCREAMING_SNAKE_CASE = prepare_ref(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = [json.dumps(__lowerCamelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
lowercase_ = parser.parse_args()
main(args)
| 58 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: list , lowerCAmelCase__: list , lowerCAmelCase__: int , lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
if index == number_of_items:
return 0
UpperCAmelCase_: int = 0
UpperCAmelCase_: str = 0
UpperCAmelCase_: Optional[int] = knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_: List[Any] = values[index] + knapsack(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_weight - weights[index] , index + 1 )
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 0 |
import os
from pathlib import Path
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : int ,__lowercase : int ):
'''simple docstring'''
A_ : List[str] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ : Dict = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
A_ : str = f'''{src_lang}-{tgt_lang}'''
A_ : Dict = f'''\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'''
os.makedirs(__lowercase ,exist_ok=__lowercase )
A_ : Optional[Any] = os.path.join(__lowercase ,'README.md' )
print(f'''Generating {path}''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
_UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase = model_name.split("""-""")
_UpperCAmelCase = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 140 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : List[Any] , lowercase_ : List[Any]=100 , lowercase_ : List[Any]=13 , lowercase_ : Any=30 , lowercase_ : int=2 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[int]=True , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Dict=4 , lowercase_ : Tuple=4 , lowercase_ : str=37 , lowercase_ : Tuple="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=10 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = 100
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : List[Any] = patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : int = scope
SCREAMING_SNAKE_CASE_ : str = out_indices
SCREAMING_SNAKE_CASE_ : Dict = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : List[str] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : str = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = BeitModel(config=a__)
model.to(a__)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[str] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = BeitForMaskedImageModeling(config=a__)
model.to(a__)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BeitForImageClassification(a__)
model.to(a__)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : int = BeitForImageClassification(a__)
model.to(a__)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : List[Any] = model(a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : int , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = BeitForSemanticSegmentation(a__)
model.to(a__)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(a__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
SCREAMING_SNAKE_CASE_ : List[str] = model(a__ , labels=a__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = BeitModelTester(self)
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(a__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(a__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a__), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(a__)
model.to(a__)
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(a__ , a__ , return_labels=a__)
SCREAMING_SNAKE_CASE_ : List[str] = model(**a__).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a__), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(a__)
model.gradient_checkpointing_enable()
model.to(a__)
model.train()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(a__ , a__ , return_labels=a__)
SCREAMING_SNAKE_CASE_ : int = model(**a__).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = _config_zero_init(a__)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(config=a__)
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str = BeitModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''').to(a__)
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(images=a__ , return_tensors='''pt''').pixel_values.to(a__)
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ : Any = torch.ones((1, 196) , dtype=torch.bool).to(a__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(pixel_values=a__ , bool_masked_pos=a__)
SCREAMING_SNAKE_CASE_ : Dict = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 196, 8192))
self.assertEqual(logits.shape , a__)
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(
[[-3.24_37, 0.50_72, -13.9174], [-3.24_56, 0.49_48, -13.9401], [-3.20_33, 0.51_21, -13.8550]]).to(a__)
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a__ , atol=1e-2))
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''').to(a__)
SCREAMING_SNAKE_CASE_ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**a__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : Any = torch.Size((1, 1000))
self.assertEqual(logits.shape , a__)
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([-1.23_85, -1.09_87, -1.01_08]).to(a__)
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1e-4))
SCREAMING_SNAKE_CASE_ : Optional[Any] = 281
self.assertEqual(logits.argmax(-1).item() , a__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''').to(
a__)
SCREAMING_SNAKE_CASE_ : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Dict = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**a__)
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 21841))
self.assertEqual(logits.shape , a__)
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([1.68_81, -0.27_87, 0.59_01]).to(a__)
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1e-4))
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2396
self.assertEqual(logits.argmax(-1).item() , a__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.to(a__)
SCREAMING_SNAKE_CASE_ : Optional[int] = BeitImageProcessor(do_resize=a__ , size=640 , do_center_crop=a__)
SCREAMING_SNAKE_CASE_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(ds[0]['''file'''])
SCREAMING_SNAKE_CASE_ : int = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**a__)
SCREAMING_SNAKE_CASE_ : Tuple = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 150, 160, 160))
self.assertEqual(logits.shape , a__)
SCREAMING_SNAKE_CASE_ : int = version.parse(PIL.__version__) < version.parse('''9.0.0''')
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=a__ , )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=a__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''')
SCREAMING_SNAKE_CASE_ : Optional[int] = model.to(a__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = BeitImageProcessor(do_resize=a__ , size=640 , do_center_crop=a__)
SCREAMING_SNAKE_CASE_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(ds[0]['''file'''])
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**a__)
SCREAMING_SNAKE_CASE_ : List[str] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(500, 300)])
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , a__)
SCREAMING_SNAKE_CASE_ : Any = image_processor.post_process_semantic_segmentation(outputs=a__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((160, 160))
self.assertEqual(segmentation[0].shape , a__)
| 91 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor()
_SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A : Optional[Any] = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = '''\n'''.join(UpperCamelCase__ )
Path(UpperCamelCase__ ).open('''w''' ).writelines(UpperCamelCase__ )
__A : List[str] = "patrickvonplaten/t5-tiny-random"
__A : str = "sshleifer/bart-tiny-random"
__A : Optional[int] = "sshleifer/tiny-mbart"
__A : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class A_ (lowercase_ ):
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(a__ , a__ )
UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = F"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split()
with patch.object(a__ , '''argv''' , a__ ):
run_generate()
assert Path(a__ ).exists()
# os.remove(Path(output_file_name))
def _lowercase ( self ):
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _lowercase ( self , _A ):
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase = str(tmp_dir / '''scores.json''' )
UpperCAmelCase = str(tmp_dir / '''val.target''' )
_dump_articles(a__ , text['''en'''] )
_dump_articles(a__ , text['''de'''] )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = F"""\n run_eval_search.py\n {model}\n {str(a__ )}\n {str(a__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(a__ , '''argv''' , a__ ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(a__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a__ ).exists()
os.remove(Path(a__ ) )
| 273 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "upernet"
def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a__ , a__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(a__ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : int , A : int ) -> Tuple:
UpperCAmelCase_ : Optional[int] = []
create_all_state(1 , A , A , [] , A )
return result
def __UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ) -> Dict:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def __UpperCAmelCase ( A : list[list[int]] ) -> str:
for i in total_list:
print(*A )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = 4
_UpperCamelCase : List[Any] = 2
_UpperCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 304 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A_ ( A__ , A__ , A__ ) -> Tuple:
if isinstance(A__ , torch.Tensor ):
return image
elif isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
a__ : Optional[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a__ : str = np.concatenate(A__ , axis=0 )
a__ : List[Any] = np.array(A__ ).astype(np.floataa ) / 2_55.0
a__ : Any = image.transpose(0 , 3 , 1 , 2 )
a__ : List[str] = 2.0 * image - 1.0
a__ : str = torch.from_numpy(A__ )
elif isinstance(image[0] , torch.Tensor ):
a__ : Union[str, Any] = torch.cat(A__ , dim=0 )
return image
def A_ ( A__ , A__ , A__ , A__=0.99_95 ) -> Union[str, Any]:
if not isinstance(A__ , np.ndarray ):
a__ : str = True
a__ : int = va.device
a__ : Optional[Any] = va.cpu().numpy()
a__ : int = va.cpu().numpy()
a__ : Tuple = np.sum(va * va / (np.linalg.norm(A__ ) * np.linalg.norm(A__ )) )
if np.abs(A__ ) > DOT_THRESHOLD:
a__ : int = (1 - t) * va + t * va
else:
a__ : Tuple = np.arccos(A__ )
a__ : Any = np.sin(A__ )
a__ : Union[str, Any] = theta_a * t
a__ : str = np.sin(A__ )
a__ : Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
a__ : Optional[Any] = sin_theta_t / sin_theta_a
a__ : Tuple = sa * va + sa * va
if inputs_are_torch:
a__ : Optional[Any] = torch.from_numpy(A__ ).to(A__ )
return va
def A_ ( A__ , A__ ) -> int:
a__ : Tuple = F.normalize(A__ , dim=-1 )
a__ : Optional[int] = F.normalize(A__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A_ ( A__ , A__ ) -> Any:
for param in model.parameters():
a__ : Dict = value
class A__ ( lowercase_ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=a__ , text_encoder=a__ , clip_model=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , feature_extractor=a__ , coca_model=a__ , coca_tokenizer=a__ , coca_transform=a__ , )
a__ : Any = (
feature_extractor.size
if isinstance(feature_extractor.size , a__)
else feature_extractor.size['shortest_edge']
)
a__ : Tuple = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , a__)
set_requires_grad(self.clip_model , a__)
def __lowercase ( self , lowercase = "auto") -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a__)
def __lowercase ( self) -> str:
'''simple docstring'''
self.enable_attention_slicing(a__)
def __lowercase ( self) -> Any:
'''simple docstring'''
set_requires_grad(self.vae , a__)
def __lowercase ( self) -> Dict:
'''simple docstring'''
set_requires_grad(self.vae , a__)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
set_requires_grad(self.unet , a__)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
set_requires_grad(self.unet , a__)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = min(int(num_inference_steps * strength) , a__)
a__ : str = max(num_inference_steps - init_timestep , 0)
a__ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if not isinstance(a__ , torch.Tensor):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(a__)}')
a__ : List[str] = image.to(device=a__ , dtype=a__)
if isinstance(a__ , a__):
a__ : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(a__)
]
a__ : Optional[Any] = torch.cat(a__ , dim=0)
else:
a__ : List[str] = self.vae.encode(a__).latent_dist.sample(a__)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a__ : Any = 0.1_82_15 * init_latents
a__ : Any = init_latents.repeat_interleave(a__ , dim=0)
a__ : str = randn_tensor(init_latents.shape , generator=a__ , device=a__ , dtype=a__)
# get latents
a__ : int = self.scheduler.add_noise(a__ , a__ , a__)
a__ : List[Any] = init_latents
return latents
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : List[Any] = self.coca_transform(a__).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
a__ : Optional[int] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
a__ : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>' , '').rstrip(' .,')
def __lowercase ( self , lowercase , lowercase) -> int:
'''simple docstring'''
a__ : List[str] = self.feature_extractor.preprocess(a__)
a__ : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
a__ : Dict = self.clip_model.get_image_features(a__)
a__ : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a__)
a__ : Optional[Any] = image_embeddings_clip.repeat_interleave(a__ , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = latents.detach().requires_grad_()
a__ : Optional[int] = self.scheduler.scale_model_input(a__ , a__)
# predict the noise residual
a__ : Any = self.unet(a__ , a__ , encoder_hidden_states=a__).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
a__ : Any = self.scheduler.alphas_cumprod[timestep]
a__ : Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : int = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a__ : Any = torch.sqrt(a__)
a__ : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a__):
a__ : Tuple = self.scheduler.sigmas[index]
a__ : Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler)} not supported')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a__ : Optional[Any] = 1 / 0.1_82_15 * sample
a__ : Union[str, Any] = self.vae.decode(a__).sample
a__ : Any = (image / 2 + 0.5).clamp(0 , 1)
a__ : List[Any] = transforms.Resize(self.feature_extractor_size)(a__)
a__ : Dict = self.normalize(a__).to(latents.dtype)
a__ : List[Any] = self.clip_model.get_image_features(a__)
a__ : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a__)
a__ : str = spherical_dist_loss(a__ , a__).mean() * clip_guidance_scale
a__ : Any = -torch.autograd.grad(a__ , a__)[0]
if isinstance(self.scheduler , a__):
a__ : Any = latents.detach() + grads * (sigma**2)
a__ : Dict = noise_pred_original
else:
a__ : Any = noise_pred_original - torch.sqrt(a__) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = 512 , lowercase = 512 , lowercase = 0.6 , lowercase = 50 , lowercase = 7.5 , lowercase = 1 , lowercase = 0.0 , lowercase = 100 , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = 0.8 , lowercase = 0.1 , lowercase = 0.1 , ) -> Tuple:
'''simple docstring'''
if isinstance(a__ , a__) and len(a__) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(a__)} generators.')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if isinstance(a__ , torch.Generator) and batch_size > 1:
a__ : Dict = [generator] + [None] * (batch_size - 1)
a__ : int = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a__ : int = [x[0] for x in coca_is_none if x[1]]
a__ : Dict = ', '.join(a__)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a__):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.')
a__ : str = self.get_image_description(a__)
if style_prompt is None:
if len(a__):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.')
a__ : List[str] = self.get_image_description(a__)
# get prompt text embeddings for content and style
a__ : Any = self.tokenizer(
a__ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a__ , return_tensors='pt' , )
a__ : Dict = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
a__ : Any = self.tokenizer(
a__ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a__ , return_tensors='pt' , )
a__ : Optional[int] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
a__ : Optional[Any] = slerp(a__ , a__ , a__)
# duplicate text embeddings for each generation per prompt
a__ : Any = text_embeddings.repeat_interleave(a__ , dim=0)
# set timesteps
a__ : str = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
a__ : Tuple = {}
if accepts_offset:
a__ : Dict = 1
self.scheduler.set_timesteps(a__ , **a__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
a__ , a__ : Optional[Any] = self.get_timesteps(a__ , a__ , self.device)
a__ : int = timesteps[:1].repeat(a__)
# Preprocess image
a__ : int = preprocess(a__ , a__ , a__)
a__ : Tuple = self.prepare_latents(
a__ , a__ , a__ , text_embeddings.dtype , self.device , a__)
a__ : Any = preprocess(a__ , a__ , a__)
a__ : Tuple = self.prepare_latents(
a__ , a__ , a__ , text_embeddings.dtype , self.device , a__)
a__ : Optional[Any] = slerp(a__ , a__ , a__)
if clip_guidance_scale > 0:
a__ : str = self.get_clip_image_embeddings(a__ , a__)
a__ : Any = self.get_clip_image_embeddings(a__ , a__)
a__ : str = slerp(
a__ , a__ , a__)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a__ : Union[str, Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a__ : int = content_text_input.input_ids.shape[-1]
a__ : List[str] = self.tokenizer([''] , padding='max_length' , max_length=a__ , return_tensors='pt')
a__ : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
a__ : Optional[int] = uncond_embeddings.repeat_interleave(a__ , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__ : Any = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a__ : int = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a__ : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a__ : Optional[Any] = torch.randn(a__ , generator=a__ , device='cpu' , dtype=a__).to(
self.device)
else:
a__ : str = torch.randn(a__ , generator=a__ , device=self.device , dtype=a__)
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
a__ : Optional[Any] = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
a__ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : str = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
a__ : Tuple = {}
if accepts_eta:
a__ : Dict = eta
# check if the scheduler accepts generator
a__ : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
a__ : List[str] = generator
with self.progress_bar(total=a__):
for i, t in enumerate(a__):
# expand the latents if we are doing classifier free guidance
a__ : Optional[int] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a__ : str = self.scheduler.scale_model_input(a__ , a__)
# predict the noise residual
a__ : List[str] = self.unet(a__ , a__ , encoder_hidden_states=a__).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a__ , a__ : List[Any] = noise_pred.chunk(2)
a__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a__ : Dict = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
a__ , a__ : Union[str, Any] = self.cond_fn(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
# compute the previous noisy sample x_t -> x_t-1
a__ : List[str] = self.scheduler.step(a__ , a__ , a__ , **a__).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a__ : List[Any] = 1 / 0.1_82_15 * latents
a__ : List[Any] = self.vae.decode(a__).sample
a__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1)
a__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__ : Dict = self.numpy_to_pil(a__)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a__ , nsfw_content_detected=a__)
| 99 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase = "roberta"
elif args.model_type == "gpt2":
UpperCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase = "transformer"
UpperCamelCase = model.state_dict()
UpperCamelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase = F'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase = F'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[F'''lm_head.dense.{w}''']
UpperCamelCase = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase = state_dict[F'''{prefix}.ln_f.{w}''']
UpperCamelCase = state_dict["lm_head.weight"]
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 186 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85 | 0 |
'''simple docstring'''
a__ : Dict = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a__ : Tuple = concatenate_datasets
a__ : List[Any] = DownloadConfig
a__ : Dict = DownloadManager
a__ : List[str] = DownloadMode
a__ : Optional[Any] = DownloadConfig
a__ : List[Any] = DownloadMode
a__ : str = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 80 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 85 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
'''simple docstring'''
A__ : Dict = CLIPConfig
A__ : Dict = ["CLIPEncoderLayer"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: int ) -> Dict:
super().__init__(a__ )
UpperCAmelCase_ : Tuple = CLIPVisionModelWithProjection(config.vision_config )
UpperCAmelCase_ : List[Any] = nn.Linear(config.vision_config.projection_dim ,1 )
UpperCAmelCase_ : Any = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def A__ ( self: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict=0.5 ,lowerCamelCase_: Optional[Any]=0.5 ) -> Any:
UpperCAmelCase_ : str = self.vision_model(a__ )[0]
UpperCAmelCase_ : Optional[int] = self.p_head(a__ )
UpperCAmelCase_ : str = nsfw_detected.flatten()
UpperCAmelCase_ : List[Any] = nsfw_detected > p_threshold
UpperCAmelCase_ : Optional[int] = nsfw_detected.tolist()
if any(a__ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(a__ ):
if nsfw_detected_:
UpperCAmelCase_ : List[str] = np.zeros(images[idx].shape )
UpperCAmelCase_ : Optional[int] = self.w_head(a__ )
UpperCAmelCase_ : int = watermark_detected.flatten()
UpperCAmelCase_ : Optional[Any] = watermark_detected > w_threshold
UpperCAmelCase_ : int = watermark_detected.tolist()
if any(a__ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(a__ ):
if watermark_detected_:
UpperCAmelCase_ : str = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 345 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionLDMaDPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case_( self ) -> int:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(a__ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_( self , A , A=0 ) -> List[str]:
if str(a__ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(a__ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=a__ ).manual_seed(a__ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe.to(a__ )
ldmad_pipe.set_progress_bar_config(disable=a__ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe(**a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.rgb, output.depth
_SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_SCREAMING_SNAKE_CASE = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_SCREAMING_SNAKE_CASE = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe.to(a__ )
ldmad_pipe.set_progress_bar_config(disable=a__ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
_SCREAMING_SNAKE_CASE = 3 * [inputs["""prompt"""]]
# forward
_SCREAMING_SNAKE_CASE = ldmad_pipe(**a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.rgb, output.depth
_SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1]
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
_SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )]
_SCREAMING_SNAKE_CASE = ldmad_pipe.tokenizer(
a__ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=a__ , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = text_inputs["""input_ids"""].to(a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe.text_encoder(a__ )[0]
_SCREAMING_SNAKE_CASE = prompt_embeds
# forward
_SCREAMING_SNAKE_CASE = ldmad_pipe(**a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.rgb, output.depth
_SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=a__ )
_SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe.to(a__ )
ldmad_pipe.set_progress_bar_config(disable=a__ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a__ )
_SCREAMING_SNAKE_CASE = """french fries"""
_SCREAMING_SNAKE_CASE = ldmad_pipe(**a__ , negative_prompt=a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.rgb, output.depth
_SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_SCREAMING_SNAKE_CASE = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_SCREAMING_SNAKE_CASE = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self , A , A="cpu" , A=torch.floataa , A=0 ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = torch.Generator(device=a__ ).manual_seed(a__ )
_SCREAMING_SNAKE_CASE = np.random.RandomState(a__ ).standard_normal((1, 4, 64, 64) )
_SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ).to(device=a__ , dtype=a__ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
_SCREAMING_SNAKE_CASE = ldmad_pipe.to(a__ )
ldmad_pipe.set_progress_bar_config(disable=a__ )
_SCREAMING_SNAKE_CASE = self.get_inputs(a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe(**a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.rgb, output.depth
_SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1].flatten()
_SCREAMING_SNAKE_CASE = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_SCREAMING_SNAKE_CASE = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_SCREAMING_SNAKE_CASE = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self , A , A="cpu" , A=torch.floataa , A=0 ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = torch.Generator(device=a__ ).manual_seed(a__ )
_SCREAMING_SNAKE_CASE = np.random.RandomState(a__ ).standard_normal((1, 4, 64, 64) )
_SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ).to(device=a__ , dtype=a__ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(a__ )
ldmad_pipe.set_progress_bar_config(disable=a__ )
_SCREAMING_SNAKE_CASE = self.get_inputs(a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe(**a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.rgb, output.depth
_SCREAMING_SNAKE_CASE = 0.49_5586
_SCREAMING_SNAKE_CASE = 0.3379_5515
_SCREAMING_SNAKE_CASE = 112.4_8518
_SCREAMING_SNAKE_CASE = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(a__ )
ldmad_pipe.set_progress_bar_config(disable=a__ )
_SCREAMING_SNAKE_CASE = self.get_inputs(a__ )
_SCREAMING_SNAKE_CASE = ldmad_pipe(**a__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = output.rgb, output.depth
_SCREAMING_SNAKE_CASE = 0.419_4127
_SCREAMING_SNAKE_CASE = 0.3537_5586
_SCREAMING_SNAKE_CASE = 0.563_8502
_SCREAMING_SNAKE_CASE = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 58 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[int] ):
'''simple docstring'''
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case (self ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_: str = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return model
@property
def __snake_case (self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), cross_attention_dim=10, )
return model
@property
def __snake_case (self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D"""), up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D"""), )
UpperCAmelCase_: List[Any] = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return vqvae, unet
@slow
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCAmelCase_: int = DDPMScheduler()
UpperCAmelCase_: List[Any] = AudioDiffusionPipeline(vqvae=a__, unet=self.dummy_unet, mel=a__, scheduler=a__ )
UpperCAmelCase_: Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase_: str = torch.Generator(device=a__ ).manual_seed(42 )
UpperCAmelCase_: int = pipe(generator=a__, steps=4 )
UpperCAmelCase_: Union[str, Any] = output.audios[0]
UpperCAmelCase_: Optional[int] = output.images[0]
UpperCAmelCase_: List[Any] = torch.Generator(device=a__ ).manual_seed(42 )
UpperCAmelCase_: Tuple = pipe(generator=a__, steps=4, return_dict=a__ )
UpperCAmelCase_: Optional[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_: List[str] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Optional[int] = np.frombuffer(image_from_tuple.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Optional[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: Dict = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCAmelCase_: str = DDIMScheduler()
UpperCAmelCase_: str = self.dummy_vqvae_and_unet
UpperCAmelCase_: str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=a__, scheduler=a__ )
UpperCAmelCase_: Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
UpperCAmelCase_: Tuple = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_: List[str] = torch.Generator(device=a__ ).manual_seed(42 )
UpperCAmelCase_: List[str] = pipe(raw_audio=a__, generator=a__, start_step=5, steps=10 )
UpperCAmelCase_: Dict = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_: Union[str, Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Dict = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: List[str] = self.dummy_unet_condition
UpperCAmelCase_: Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=a__, mel=a__, scheduler=a__ )
UpperCAmelCase_: Union[str, Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
UpperCAmelCase_: Union[str, Any] = torch.rand((1, 1, 10) )
UpperCAmelCase_: str = pipe(generator=a__, encoding=a__ )
UpperCAmelCase_: Union[str, Any] = output.images[0]
UpperCAmelCase_: Union[str, Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Optional[int] = torch_device
UpperCAmelCase_: Optional[Any] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
UpperCAmelCase_: Any = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase_: Optional[int] = torch.Generator(device=a__ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(generator=a__ )
UpperCAmelCase_: List[Any] = output.audios[0]
UpperCAmelCase_: int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_: Tuple = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Optional[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 147 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85 | 0 |
def UpperCamelCase ( __lowercase : int = 3 ,__lowercase : int = 7 ,__lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : Tuple = 0
A_ : List[str] = 1
for current_denominator in range(1 ,limit + 1 ):
A_ : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A_ : Optional[Any] = current_numerator
A_ : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 140 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
UpperCAmelCase_ : List[str] = namedtuple("""CoinsDistribResult""", """moves excess""")
def _A (__a ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__a ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__a ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__a ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = get_distrib(node.left )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = get_distrib(node.right )
SCREAMING_SNAKE_CASE_ : List[str] = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE_ : Optional[int] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 |
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85 | 0 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class snake_case__ ( lowercase_):
a_ = "upernet"
def __init__( self : Optional[Any] , _A : List[str]=None , _A : str=5_12 , _A : Tuple=0.02 , _A : Any=[1, 2, 3, 6] , _A : int=True , _A : List[str]=0.4 , _A : Dict=3_84 , _A : int=2_56 , _A : List[str]=1 , _A : Tuple=False , _A : Any=2_55 , **_A : Any , ) -> Union[str, Any]:
super().__init__(**a__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(a__ , a__ ):
UpperCAmelCase_ : Optional[Any] = backbone_config.get('''model_type''' )
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Optional[Any] = config_class.from_dict(a__ )
UpperCAmelCase_ : Any = backbone_config
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = pool_scales
UpperCAmelCase_ : int = use_auxiliary_head
UpperCAmelCase_ : int = auxiliary_loss_weight
UpperCAmelCase_ : Any = auxiliary_in_channels
UpperCAmelCase_ : Dict = auxiliary_channels
UpperCAmelCase_ : List[Any] = auxiliary_num_convs
UpperCAmelCase_ : str = auxiliary_concat_input
UpperCAmelCase_ : Dict = loss_ignore_index
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : List[str] = self.__class__.model_type
return output
| 304 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if len(_SCREAMING_SNAKE_CASE ) != 2 or len(a[0] ) != 2 or len(_SCREAMING_SNAKE_CASE ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
a__: List[Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_SCREAMING_SNAKE_CASE ) )
]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_SCREAMING_SNAKE_CASE ) )
]
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
if len(_SCREAMING_SNAKE_CASE ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = matrix_length // 2
a__: int = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE )]
a__: int = [
[a[i][j] for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
a__: Optional[Any] = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE )]
a__: Any = [[a[i][j] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return top_left, top_right, bot_left, bot_right
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
return len(_SCREAMING_SNAKE_CASE ), len(matrix[0] )
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
print('\n'.join(str(_SCREAMING_SNAKE_CASE ) for line in matrix ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
if matrix_dimensions(_SCREAMING_SNAKE_CASE ) == (2, 2):
return default_matrix_multiplication(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ , a__ , a__ , a__: Any = split_matrix(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__ , a__: str = split_matrix(_SCREAMING_SNAKE_CASE )
a__: Dict = actual_strassen(_SCREAMING_SNAKE_CASE , matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: List[Any] = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
a__: Dict = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
a__: List[str] = actual_strassen(_SCREAMING_SNAKE_CASE , matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: str = actual_strassen(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: Tuple = actual_strassen(matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: Union[str, Any] = actual_strassen(matrix_subtraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a__: str = matrix_addition(matrix_subtraction(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
a__: Dict = matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: str = matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: int = matrix_subtraction(matrix_subtraction(matrix_addition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# construct the new matrix from our 4 quadrants
a__: Dict = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
if matrix_dimensions(_SCREAMING_SNAKE_CASE )[1] != matrix_dimensions(_SCREAMING_SNAKE_CASE )[0]:
a__: Union[str, Any] = (
'Unable to multiply these matrices, please check the dimensions.\n'
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(_SCREAMING_SNAKE_CASE )
a__: List[Any] = matrix_dimensions(_SCREAMING_SNAKE_CASE )
a__: int = matrix_dimensions(_SCREAMING_SNAKE_CASE )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
a__: List[str] = max(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
a__: str = int(math.pow(2 , math.ceil(math.loga(_SCREAMING_SNAKE_CASE ) ) ) )
a__: str = matrixa
a__: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
a__: Any = actual_strassen(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Removing the additional zeros
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _SCREAMING_SNAKE_CASE ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase__ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase__ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 290 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 0 |
from manim import *
class A__ ( lowercase_ ):
"""simple docstring"""
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[int] = Rectangle(height=0.5 , width=0.5)
a__ : List[str] = Rectangle(height=0.25 , width=0.25)
a__ : Dict = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ : List[Any] = [mem.copy() for i in range(6)]
a__ : int = [mem.copy() for i in range(6)]
a__ : Optional[int] = VGroup(*a__).arrange(a__ , buff=0)
a__ : Optional[Any] = VGroup(*a__).arrange(a__ , buff=0)
a__ : Optional[Any] = VGroup(a__ , a__).arrange(a__ , buff=0)
a__ : Union[str, Any] = Text('CPU' , font_size=24)
a__ : Dict = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
cpu.move_to([-2.5, -0.5, 0])
self.add(a__)
a__ : str = [mem.copy() for i in range(4)]
a__ : str = VGroup(*a__).arrange(a__ , buff=0)
a__ : Tuple = Text('GPU' , font_size=24)
a__ : Tuple = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
gpu.move_to([-1, -1, 0])
self.add(a__)
a__ : List[Any] = [mem.copy() for i in range(6)]
a__ : str = VGroup(*a__).arrange(a__ , buff=0)
a__ : Dict = Text('Model' , font_size=24)
a__ : List[Any] = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
model.move_to([3, -1.0, 0])
self.add(a__)
a__ : Optional[int] = []
a__ : int = []
a__ : Any = []
for i, rect in enumerate(a__):
rect.set_stroke(a__)
a__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(a__ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=a__)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0)
self.add(a__)
model_cpu_arr.append(a__)
self.add(*a__ , *a__ , *a__)
a__ : str = [mem.copy() for i in range(6)]
a__ : Tuple = VGroup(*a__).arrange(a__ , buff=0)
a__ : str = Text('Loaded Checkpoint' , font_size=24)
a__ : Tuple = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
checkpoint.move_to([3, 0.5, 0])
self.add(a__)
a__ : Dict = []
a__ : Optional[Any] = []
for i, rect in enumerate(a__):
a__ : List[str] = fill.copy().set_fill(a__ , opacity=0.7)
target.move_to(a__)
ckpt_arr.append(a__)
a__ : Optional[int] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(a__)
self.add(*a__ , *a__)
a__ : int = Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ : Dict = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(a__ , a__)
a__ : int = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(a__)
a__ : Optional[Any] = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0])
a__ : Tuple = [meta_mem.copy() for i in range(6)]
a__ : str = [meta_mem.copy() for i in range(6)]
a__ : Optional[Any] = VGroup(*a__).arrange(a__ , buff=0)
a__ : List[Any] = VGroup(*a__).arrange(a__ , buff=0)
a__ : Tuple = VGroup(a__ , a__).arrange(a__ , buff=0)
a__ : List[str] = Text('Disk' , font_size=24)
a__ : List[str] = Group(a__ , a__).arrange(a__ , buff=0.5 , aligned_edge=a__)
disk.move_to([-4.0, -1.25, 0])
self.play(Write(a__ , run_time=3) , Write(a__ , run_time=1) , Create(a__ , run_time=1))
a__ : Any = []
for i, rect in enumerate(a__):
a__ : Optional[int] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(a__ , run_time=1.5))
self.play(*a__)
self.play(FadeOut(a__))
a__ : Any = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(a__ , run_time=3))
self.play(
FadeOut(a__ , a__ , *a__ , *a__) , )
self.wait()
| 99 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Any:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : str = self.dummy_uncond_unet
A_ : Union[str, Any] = KarrasVeScheduler()
A_ : List[str] = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Any = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(num_inference_steps=2 , generator=a__ , output_type='''numpy''' ).images
A_ : List[str] = torch.manual_seed(0 )
A_ : int = pipe(num_inference_steps=2 , generator=a__ , output_type='''numpy''' , return_dict=a__ )[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = '''google/ncsnpp-celebahq-256'''
A_ : Union[str, Any] = UNetaDModel.from_pretrained(a__ )
A_ : List[Any] = KarrasVeScheduler()
A_ : Tuple = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Dict = torch.manual_seed(0 )
A_ : Tuple = pipe(num_inference_steps=20 , generator=a__ , output_type='''numpy''' ).images
A_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Union[str, Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 186 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = list(__A )
UpperCamelCase__ = list(__A )
UpperCamelCase__ = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase__ = "_"
if count > 1:
return False
else:
return "".join(__A )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = []
while True:
UpperCamelCase__ = ["$"] * len(__A )
UpperCamelCase__ = []
for i in range(len(__A ) ):
for j in range(i + 1 , len(__A ) ):
UpperCamelCase__ = compare_string(binary[i] , binary[j] )
if k is False:
UpperCamelCase__ = "*"
UpperCamelCase__ = "*"
temp.append("X" )
for i in range(len(__A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__A ) == 0:
return pi
UpperCamelCase__ = list(set(__A ) )
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = []
for minterm in minterms:
UpperCamelCase__ = ""
for _ in range(__A ):
UpperCamelCase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(__A )
return temp
def _UpperCamelCase ( __A , __A , __A ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = list(__A )
UpperCamelCase__ = list(__A )
UpperCamelCase__ = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = [0] * len(__A )
for i in range(len(chart[0] ) ):
UpperCamelCase__ = 0
UpperCamelCase__ = -1
for j in range(len(__A ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase__ = j
if count == 1:
UpperCamelCase__ = 1
for i in range(len(__A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__A ) ):
UpperCamelCase__ = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase__ = 0
UpperCamelCase__ = -1
UpperCamelCase__ = 0
for i in range(len(__A ) ):
UpperCamelCase__ = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase__ = count_n
UpperCamelCase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__A ) ):
UpperCamelCase__ = 0
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = [[0 for x in range(len(__A ) )] for x in range(len(__A ) )]
for i in range(len(__A ) ):
UpperCamelCase__ = prime_implicants[i].count("_" )
for j in range(len(__A ) ):
if is_for_table(prime_implicants[i] , binary[j] , __A ):
UpperCamelCase__ = 1
return chart
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = int(input("Enter the no. of variables\n" ) )
UpperCamelCase__ = [
float(__A )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
UpperCamelCase__ = decimal_to_binary(__A , __A )
UpperCamelCase__ = check(__A )
print("Prime Implicants are:" )
print(__A )
UpperCamelCase__ = prime_implicant_chart(__A , __A )
UpperCamelCase__ = selection(__A , __A )
print("Essential Prime Implicants are:" )
print(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85 | 0 |
from __future__ import annotations
import requests
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
UpperCAmelCase_ : int = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_a ).json()
def lowerCamelCase_ ( _a : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
UpperCAmelCase_ : Any = requests.get(_a ).json()[:max_stories]
return [get_hackernews_story(_a ) for story_id in story_ids]
def lowerCamelCase_ ( _a : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = hackernews_top_stories(_a )
return "\n".join("""* [{title}]({url})""".format(**_a ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 345 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85 | 0 |
'''simple docstring'''
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a_ ( lowercase_ ):
'''simple docstring'''
UpperCamelCase = "facebook/bart-large-mnli"
UpperCamelCase = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
UpperCamelCase = "text_classifier"
UpperCamelCase = AutoTokenizer
UpperCamelCase = AutoModelForSequenceClassification
UpperCamelCase = ["text", ["text"]]
UpperCamelCase = ["text"]
def snake_case_( self ) -> Any:
super().setup()
_SCREAMING_SNAKE_CASE = self.model.config
_SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
_SCREAMING_SNAKE_CASE = int(a__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def snake_case_( self , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(a__ ) , [f'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def snake_case_( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 58 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 | 0 |
import string
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = """"""
for i in sequence:
UpperCAmelCase_: int = ord(lowerCAmelCase__ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = string.ascii_letters
UpperCAmelCase_: Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def lowerCAmelCase_ ():
"""simple docstring"""
from timeit import timeit
print("""Running performance benchmarks...""" )
UpperCAmelCase_: Union[str, Any] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCAmelCase__ )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowerCAmelCase__ )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 147 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
import re
def _A (__a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__a , __a ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 91 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor()
_SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : int = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
require_version(deps[pkg] , UpperCamelCase__ )
| 273 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "upernet"
def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a__ , a__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(a__ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int ) -> List[Any]:
if num <= 0:
UpperCAmelCase_ : int = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(A )
UpperCAmelCase_ : int = [True] * (num + 1)
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[Any] = int(math.sqrt(A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A )
# Set multiples of start be False
for i in range(start * start , num + 1 , A ):
if sieve[i] is True:
UpperCAmelCase_ : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 304 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
a__: str = 0
a__: Tuple = number
while duplicate > 0:
a__ , a__: Any = divmod(_SCREAMING_SNAKE_CASE , 10 )
fact_sum += factorial(_SCREAMING_SNAKE_CASE )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
lowercase__ = int(input('Enter number: ').strip())
print(
f"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 290 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase : int = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowercase : List[Any] = json.load(f)
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(a__)
def __lowercase ( self , lowercase) -> Tuple:
'''simple docstring'''
a__ : str = FSMTForConditionalGeneration.from_pretrained(a__).to(a__)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
])
@slow
def __lowercase ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = F'facebook/wmt19-{pair}'
a__ : Union[str, Any] = self.get_tokenizer(a__)
a__ : Union[str, Any] = self.get_model(a__)
a__ : List[str] = bleu_data[pair]['src']
a__ : List[str] = bleu_data[pair]['tgt']
a__ : Tuple = tokenizer(a__ , return_tensors='pt' , truncation=a__ , padding='longest').to(a__)
a__ : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
a__ : int = tokenizer.batch_decode(
a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__)
a__ : Union[str, Any] = calculate_bleu(a__ , a__)
print(a__)
self.assertGreaterEqual(scores['bleu'] , a__)
| 99 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 186 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a__ : List[Any] = "naver-clova-ix/donut-base"
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = DonutProcessor.from_pretrained(a__ )
def __a ( self ):
UpperCamelCase__ = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
UpperCamelCase__ = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
UpperCamelCase__ = self.processor.tokenajson(a__ )
self.assertDictEqual(a__ , a__ )
| 80 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 85 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase_ = "base_with_context"
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
UpperCAmelCase_ : Dict = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_a )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase_ : str = weights[F'''layers_{lyr_num}''']
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase_ : List[Any] = ly_weight["""attention"""]
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCAmelCase_ : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCamelCase_ ( _a : Optional[Any] , _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_a )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase_ : Any = weights[F'''layers_{lyr_num}''']
UpperCAmelCase_ : Optional[Any] = ly_weight["""attention"""]
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase_ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCAmelCase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCAmelCase_ : str = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowerCamelCase_ ( _a : Dict , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_a )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase_ : int = weights[F'''layers_{lyr_num}''']
UpperCAmelCase_ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
UpperCAmelCase_ : int = ly_weight["""self_attention"""]
UpperCAmelCase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase_ : Dict = ly_weight["""MultiHeadDotProductAttention_0"""]
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase_ : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCAmelCase_ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
UpperCAmelCase_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCAmelCase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCAmelCase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase_ : List[Any] = jnp.tree_util.tree_map(onp.array , _a )
UpperCAmelCase_ : int = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
UpperCAmelCase_ : Union[str, Any] = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
UpperCAmelCase_ : List[str] = inference.parse_training_gin_file(_a , _a )
UpperCAmelCase_ : List[str] = inference.InferenceModel(args.checkpoint_path , _a )
UpperCAmelCase_ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
UpperCAmelCase_ : int = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
UpperCAmelCase_ : List[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
UpperCAmelCase_ : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase_ : Union[str, Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , _a )
UpperCAmelCase_ : Tuple = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , _a )
UpperCAmelCase_ : int = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , _a )
UpperCAmelCase_ : Tuple = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
UpperCAmelCase_ : Union[str, Any] = SpectrogramDiffusionPipeline(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
UpperCamelCase_ = parser.parse_args()
main(args)
| 345 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = "PoolFormerConfig"
# Base docstring
lowercase_ = "sail/poolformer_s12"
lowercase_ = [1, 512, 7, 7]
# Image classification docstring
lowercase_ = "sail/poolformer_s12"
lowercase_ = "tabby, tabby cat"
lowercase_ = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False ) ->Dict:
if drop_prob == 0.0 or not training:
return input
_SCREAMING_SNAKE_CASE = 1 - drop_prob
_SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_SCREAMING_SNAKE_CASE = keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_SCREAMING_SNAKE_CASE = input.div(__lowerCamelCase ) * random_tensor
return output
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A = None ) -> None:
super().__init__()
_SCREAMING_SNAKE_CASE = drop_prob
def snake_case_( self , A ) -> torch.Tensor:
return drop_path(a__ , self.drop_prob , self.training )
def snake_case_( self ) -> str:
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A=None ) -> Tuple:
super().__init__()
_SCREAMING_SNAKE_CASE = patch_size if isinstance(a__ , collections.abc.Iterable ) else (patch_size, patch_size)
_SCREAMING_SNAKE_CASE = stride if isinstance(a__ , collections.abc.Iterable ) else (stride, stride)
_SCREAMING_SNAKE_CASE = padding if isinstance(a__ , collections.abc.Iterable ) else (padding, padding)
_SCREAMING_SNAKE_CASE = nn.Convad(a__ , a__ , kernel_size=a__ , stride=a__ , padding=a__ )
_SCREAMING_SNAKE_CASE = norm_layer(a__ ) if norm_layer else nn.Identity()
def snake_case_( self , A ) -> str:
_SCREAMING_SNAKE_CASE = self.projection(a__ )
_SCREAMING_SNAKE_CASE = self.norm(a__ )
return embeddings
class a_ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , A , **A ) -> List[str]:
super().__init__(1 , a__ , **a__ )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.AvgPoolad(a__ , stride=1 , padding=pool_size // 2 , count_include_pad=a__ )
def snake_case_( self , A ) -> Dict:
return self.pool(a__ ) - hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(a__ , a__ , 1 )
_SCREAMING_SNAKE_CASE = nn.Convad(a__ , a__ , 1 )
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(a__ )
if isinstance(config.hidden_act , a__ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
def snake_case_( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = self.conva(a__ )
_SCREAMING_SNAKE_CASE = self.act_fn(a__ )
_SCREAMING_SNAKE_CASE = self.drop(a__ )
_SCREAMING_SNAKE_CASE = self.conva(a__ )
_SCREAMING_SNAKE_CASE = self.drop(a__ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE = PoolFormerPooling(a__ )
_SCREAMING_SNAKE_CASE = PoolFormerOutput(a__ , a__ , a__ , a__ )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(a__ )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(a__ )
# Useful for training neural nets
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(a__ ) if drop_path > 0.0 else nn.Identity()
_SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
def snake_case_( self , A ) -> List[Any]:
if self.use_layer_scale:
_SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(a__ ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(a__ )
_SCREAMING_SNAKE_CASE = ()
_SCREAMING_SNAKE_CASE = self.output(self.after_norm(a__ ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(a__ )
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
_SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(a__ ) ) )
# First residual connection
_SCREAMING_SNAKE_CASE = pooling_output + hidden_states
_SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
_SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(a__ ) ) )
_SCREAMING_SNAKE_CASE = hidden_states + layer_output
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Tuple:
super().__init__()
_SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
_SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(a__ )
# Transformer blocks
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
a__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(a__ ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(a__ )
def snake_case_( self , A , A=False , A=True ) -> Tuple:
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
_SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
_SCREAMING_SNAKE_CASE = embedding_layer(a__ )
# Send the embeddings through the blocks
for _, blk in enumerate(a__ ):
_SCREAMING_SNAKE_CASE = blk(a__ )
_SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class a_ ( lowercase_ ):
'''simple docstring'''
UpperCamelCase = PoolFormerConfig
UpperCamelCase = "poolformer"
UpperCamelCase = "pixel_values"
UpperCamelCase = True
def snake_case_( self , A ) -> List[Any]:
if isinstance(a__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_( self , A , A=False ) -> Dict:
if isinstance(a__ , a__ ):
_SCREAMING_SNAKE_CASE = value
lowercase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowercase_ , )
class a_ ( lowercase_ ):
'''simple docstring'''
def __init__( self , A ) -> str:
super().__init__(a__ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = PoolFormerEncoder(a__ )
# Initialize weights and apply final processing
self.post_init()
def snake_case_( self ) -> Dict:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_( self , A = None , A = None , A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.dense(a__ )
return output
@add_start_docstrings(
'''\n PoolFormer Model transformer with an image classification head on top\n ''' , lowercase_ , )
class a_ ( lowercase_ ):
'''simple docstring'''
def __init__( self , A ) -> List[Any]:
super().__init__(a__ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = PoolFormerModel(a__ )
# Final norm
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_( self , A = None , A = None , A = None , A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.poolformer(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_SCREAMING_SNAKE_CASE = outputs[0]
_SCREAMING_SNAKE_CASE = self.classifier(self.norm(a__ ).mean([-2, -1] ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(a__ , a__ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 58 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[int] ):
'''simple docstring'''
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a : int = parser.parse_args()
a : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a : Union[str, Any] = CLIPImageProcessor()
a : str = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 147 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85 | 0 |
import argparse
import os
import re
_UpperCAmelCase = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_UpperCAmelCase = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
_UpperCAmelCase = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def UpperCamelCase ( __lowercase : str ,__lowercase : bool = False ):
'''simple docstring'''
with open(__lowercase ,'r' ,encoding='utf-8' ) as f:
A_ : List[Any] = f.read()
A_ : Tuple = content.split('\n' )
A_ : Any = []
A_ : Optional[Any] = 0
while line_idx < len(__lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A_ : List[Any] = len(re.search(r'^(\s*)\S' ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
A_ : Union[str, Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A_ : Optional[Any] = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A_ : List[Any] = sorted(__lowercase ,key=lambda __lowercase : _re_identifier.search(__lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write('\n'.join(__lowercase ) )
elif "\n".join(__lowercase ) != content:
return True
def UpperCamelCase ( __lowercase : bool = False ):
'''simple docstring'''
A_ : List[Any] = [os.path.join(__lowercase ,__lowercase ) for f in os.listdir(__lowercase ) if f.endswith('.py' )]
A_ : Any = [sort_auto_mapping(__lowercase ,overwrite=__lowercase ) for fname in fnames]
if not overwrite and any(__lowercase ):
A_ : List[Any] = [f for f, d in zip(__lowercase ,__lowercase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(__lowercase )}. Run `make style` to fix'''
' this.' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_UpperCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 140 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE_ : str = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE_ : Tuple = dict(zip(a__ , range(len(a__))))
SCREAMING_SNAKE_CASE_ : List[str] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE_ : List[str] = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(a__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(a__))
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname , a__)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(a__ , a__)
def _SCREAMING_SNAKE_CASE ( self : int , **lowercase_ : Any):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowercase_ : Optional[Any]):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowercase_ : str):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **a__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Any = [Image.fromarray(np.moveaxis(a__ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPProcessor(tokenizer=a__ , image_processor=a__)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a__)
SCREAMING_SNAKE_CASE_ : str = CLIPProcessor(tokenizer=a__ , image_processor=a__)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , a__)
self.assertIsInstance(processor_fast.tokenizer , a__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , a__)
self.assertIsInstance(processor_fast.image_processor , a__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor(do_normalize=a__ , padding_value=1.0)
SCREAMING_SNAKE_CASE_ : Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , a__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = CLIPProcessor(tokenizer=a__ , image_processor=a__)
SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(a__ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Dict = processor(images=a__ , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = CLIPProcessor(tokenizer=a__ , image_processor=a__)
SCREAMING_SNAKE_CASE_ : Dict = '''lower newer'''
SCREAMING_SNAKE_CASE_ : int = processor(text=a__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(a__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPProcessor(tokenizer=a__ , image_processor=a__)
SCREAMING_SNAKE_CASE_ : Any = '''lower newer'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = processor(text=a__ , images=a__)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(a__):
processor()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPProcessor(tokenizer=a__ , image_processor=a__)
SCREAMING_SNAKE_CASE_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : str = processor.batch_decode(a__)
SCREAMING_SNAKE_CASE_ : str = tokenizer.batch_decode(a__)
self.assertListEqual(a__ , a__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = CLIPProcessor(tokenizer=a__ , image_processor=a__)
SCREAMING_SNAKE_CASE_ : str = '''lower newer'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=a__ , images=a__)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 91 |
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class A_ :
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = relative_attention
UpperCAmelCase = position_biased_input
UpperCAmelCase = pos_att_type
UpperCAmelCase = scope
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFDebertaVaModel(config=a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(a__ )
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFDebertaVaForMaskedLM(config=a__ )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFDebertaVaForSequenceClassification(config=a__ )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFDebertaVaForTokenClassification(config=a__ )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFDebertaVaForQuestionAnswering(config=a__ )
UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A_ (lowercase_ , lowercase_ , unittest.TestCase ):
UpperCAmelCase__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFDebertaVaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(a__ )
@require_tf
class A_ (unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
UpperCAmelCase = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a__ , atol=1E-4 )
| 273 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( A : Tuple , A : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_ : Tuple = boundary[0]
UpperCAmelCase_ : Any = boundary[1]
UpperCAmelCase_ : List[str] = make_points(A , A , A )
UpperCAmelCase_ : List[Any] = 0.0
y += (h / 2.0) * f(A )
for i in x_i:
# print(i)
y += h * f(A )
y += (h / 2.0) * f(A )
return y
def __UpperCAmelCase ( A : Optional[Any] , A : Dict , A : Optional[Any] ) -> str:
UpperCAmelCase_ : str = a + h
while x < (b - h):
yield x
UpperCAmelCase_ : Union[str, Any] = x + h
def __UpperCAmelCase ( A : Tuple ) -> Dict: # enter your function here
UpperCAmelCase_ : Tuple = (x - 0) * (x - 0)
return y
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration
UpperCAmelCase_ : Tuple = 1.0 # Upper bound of integration
UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution
UpperCAmelCase_ : str = [a, b] # define boundary of integration
UpperCAmelCase_ : Tuple = method_a(A , A )
print(F"y = {y}" )
if __name__ == "__main__":
main()
| 304 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
a__: Optional[Any] = 0.00
a__: Any = 0
for resistor in resistors:
if resistor <= 0:
a__: int = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_SCREAMING_SNAKE_CASE )
first_sum += 1 / float(_SCREAMING_SNAKE_CASE )
index += 1
return 1 / first_sum
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: str = 0.00
a__: List[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
a__: Optional[int] = F'Resistor at index {index} has a negative value!'
raise ValueError(_SCREAMING_SNAKE_CASE )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Tuple:
'''simple docstring'''
a__ : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
a__ : Optional[int] = parent
a__ : Dict = batch_size
a__ : Any = num_channels
a__ : Union[str, Any] = min_resolution
a__ : List[str] = max_resolution
a__ : Tuple = do_resize
a__ : List[str] = size
a__ : int = do_normalize
a__ : str = image_mean
a__ : Tuple = image_std
a__ : Tuple = do_rescale
a__ : List[str] = rescale_factor
a__ : Union[str, Any] = do_pad
def __lowercase ( self) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowercase ( self , lowercase , lowercase=False) -> Optional[int]:
'''simple docstring'''
if not batched:
a__ : Union[str, Any] = image_inputs[0]
if isinstance(a__ , Image.Image):
a__ , a__ : Any = image.size
else:
a__ , a__ : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
a__ : str = int(self.size['shortest_edge'] * h / w)
a__ : Any = self.size['shortest_edge']
elif w > h:
a__ : Any = self.size['shortest_edge']
a__ : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
a__ : List[Any] = self.size['shortest_edge']
a__ : Optional[Any] = self.size['shortest_edge']
else:
a__ : Optional[Any] = []
for image in image_inputs:
a__ , a__ : Dict = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a__ : Tuple = max(a__ , key=lambda lowercase: item[0])[0]
a__ : Optional[int] = max(a__ , key=lambda lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
__A : Any = ConditionalDetrImageProcessor if is_vision_available() else None
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = ConditionalDetrImageProcessingTester(self)
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a__ , 'image_mean'))
self.assertTrue(hasattr(a__ , 'image_std'))
self.assertTrue(hasattr(a__ , 'do_normalize'))
self.assertTrue(hasattr(a__ , 'do_resize'))
self.assertTrue(hasattr(a__ , 'size'))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad , a__)
a__ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a__)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , a__)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__)
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image)
# Test not batched input
a__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ : int = self.image_processor_tester.get_expected_values(a__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ : Any = self.image_processor_tester.get_expected_values(a__ , batched=a__)
a__ : Union[str, Any] = image_processing(a__ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : str = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray)
# Test not batched input
a__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ : Tuple = self.image_processor_tester.get_expected_values(a__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Union[str, Any] = image_processing(a__ , return_tensors='pt').pixel_values
a__ , a__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a__ , batched=a__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor)
# Test not batched input
a__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__ : Optional[Any] = self.image_processor_tester.get_expected_values(a__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Optional[int] = image_processing(a__ , return_tensors='pt').pixel_values
a__ , a__ : int = self.image_processor_tester.get_expected_values(a__ , batched=a__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
a__ : List[Any] = json.loads(f.read())
a__ : List[Any] = {'image_id': 3_9769, 'annotations': target}
# encode them
a__ : Union[str, Any] = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50')
a__ : str = image_processing(images=a__ , annotations=a__ , return_tensors='pt')
# verify pixel values
a__ : List[str] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , a__)
a__ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a__ , atol=1e-4))
# verify area
a__ : List[Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a__))
# verify boxes
a__ : str = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , a__)
a__ : List[str] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a__ , atol=1e-3))
# verify image_id
a__ : Any = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a__))
# verify is_crowd
a__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a__))
# verify class_labels
a__ : int = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a__))
# verify orig_size
a__ : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a__))
# verify size
a__ : Dict = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a__))
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
a__ : Union[str, Any] = json.loads(f.read())
a__ : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
a__ : str = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
a__ : List[str] = ConditionalDetrImageProcessor(format='coco_panoptic')
a__ : str = image_processing(images=a__ , annotations=a__ , masks_path=a__ , return_tensors='pt')
# verify pixel values
a__ : Optional[int] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , a__)
a__ : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a__ , atol=1e-4))
# verify area
a__ : List[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a__))
# verify boxes
a__ : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , a__)
a__ : Optional[int] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a__ , atol=1e-3))
# verify image_id
a__ : Union[str, Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a__))
# verify is_crowd
a__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a__))
# verify class_labels
a__ : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a__))
# verify masks
a__ : Optional[int] = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a__)
# verify orig_size
a__ : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a__))
# verify size
a__ : Optional[int] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a__))
| 99 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
A_ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
A_ : Any = 1 - (matter_density + radiation_density + dark_energy)
A_ : str = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A_ : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCamelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 186 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
while i * i <= n:
UpperCamelCase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(__A ) > 500 )
if __name__ == "__main__":
print(solution())
| 80 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Tuple ) -> None:
UpperCAmelCase_ : str = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) ,1 )
self.assertEqual(x.component(2 ) ,3 )
UpperCAmelCase_ : Optional[int] = Vector()
def A__ ( self: int ) -> None:
UpperCAmelCase_ : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a__ ) ,"""(0,0,0,0,0,1)""" )
def A__ ( self: Union[str, Any] ) -> None:
UpperCAmelCase_ : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(a__ ) ,4 )
def A__ ( self: Optional[Any] ) -> None:
UpperCAmelCase_ : str = Vector([1, 2] )
UpperCAmelCase_ : List[Any] = Vector([1, 2, 3, 4, 5] )
UpperCAmelCase_ : int = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : int = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() ,2.2_3_6 ,3 )
self.assertAlmostEqual(y.euclidean_length() ,7.4_1_6 ,3 )
self.assertEqual(z.euclidean_length() ,0 )
self.assertAlmostEqual(w.euclidean_length() ,7.6_1_6 ,3 )
def A__ ( self: Optional[int] ) -> None:
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) ,2 )
self.assertEqual((x + y).component(1 ) ,3 )
self.assertEqual((x + y).component(2 ) ,4 )
def A__ ( self: Any ) -> None:
UpperCAmelCase_ : Any = Vector([1, 2, 3] )
UpperCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) ,0 )
self.assertEqual((x - y).component(1 ) ,1 )
self.assertEqual((x - y).component(2 ) ,2 )
def A__ ( self: Optional[int] ) -> None:
UpperCAmelCase_ : str = Vector([1, 2, 3] )
UpperCAmelCase_ : Union[str, Any] = Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase_ : List[Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) ,"""(3.0,6.0,9.0)""" )
self.assertEqual((a * b) ,0 )
def A__ ( self: Any ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) ,10 )
def A__ ( self: Optional[Any] ) -> None:
self.assertEqual(str(unit_basis_vector(3 ,1 ) ) ,"""(0,1,0)""" )
def A__ ( self: int ) -> None:
UpperCAmelCase_ : int = Vector([1, 2, 3] )
UpperCAmelCase_ : Any = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 ,a__ ,a__ ) ) ,"""(3,4,7)""" )
def A__ ( self: Optional[Any] ) -> None:
UpperCAmelCase_ : Tuple = Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Optional[int] = x.copy()
self.assertEqual(str(a__ ) ,str(a__ ) )
def A__ ( self: Optional[Any] ) -> None:
UpperCAmelCase_ : int = Vector([1, 0, 0] )
x.change_component(0 ,0 )
x.change_component(1 ,1 )
self.assertEqual(str(a__ ) ,"""(0,1,0)""" )
def A__ ( self: Tuple ) -> None:
UpperCAmelCase_ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" ,str(a__ ) )
def A__ ( self: List[str] ) -> None:
UpperCAmelCase_ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
UpperCAmelCase_ : Union[str, Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] ,a.minor(a__ ,a__ ) )
def A__ ( self: Dict ) -> None:
UpperCAmelCase_ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
UpperCAmelCase_ : List[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] ,a.cofactor(a__ ,a__ ) )
def A__ ( self: Dict ) -> None:
UpperCAmelCase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(-5 ,a.determinant() )
def A__ ( self: Optional[Any] ) -> None:
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ,3 ,3 )
UpperCAmelCase_ : List[Any] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" ,str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" ,str(a * 2 ) )
def A__ ( self: Dict ) -> None:
UpperCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
a.change_component(0 ,2 ,5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" ,str(a__ ) )
def A__ ( self: Tuple ) -> None:
UpperCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(7 ,a.component(2 ,1 ) ,0.0_1 )
def A__ ( self: int ) -> None:
UpperCAmelCase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
UpperCAmelCase_ : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" ,str(a + b ) )
def A__ ( self: Tuple ) -> None:
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
UpperCAmelCase_ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" ,str(a - b ) )
def A__ ( self: str ) -> None:
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" ,str(square_zero_matrix(5 ) ) ,)
if __name__ == "__main__":
unittest.main()
| 345 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
a : Optional[Any] = "."
if __name__ == "__main__":
a : Dict = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
a : str = []
a : Optional[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
a : Optional[int] = line.strip()
a : Union[str, Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
a : List[str] = "\n".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 147 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase = False
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Dict = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = 1_2
A_ : Union[str, Any] = 1_2
A_ : List[str] = {
'attention_bias': True,
'cross_attention_dim': 3_2,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 3_2,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
A_ : Tuple = TransformeraDModel(**a__ )
return model
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = 'cpu'
A_ : Union[str, Any] = self.dummy_vqvae
A_ : List[Any] = self.dummy_text_encoder
A_ : List[Any] = self.dummy_tokenizer
A_ : Dict = self.dummy_transformer
A_ : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
A_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
A_ : str = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
A_ : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Tuple = 'teddy bear playing in the pool'
A_ : Tuple = torch.Generator(device=a__ ).manual_seed(0 )
A_ : Tuple = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type='np' )
A_ : Any = output.images
A_ : Dict = torch.Generator(device=a__ ).manual_seed(0 )
A_ : int = pipe(
[prompt] , generator=a__ , output_type='np' , return_dict=a__ , num_inference_steps=2 )[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
A_ : Tuple = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = 'cpu'
A_ : Tuple = self.dummy_vqvae
A_ : Union[str, Any] = self.dummy_text_encoder
A_ : Union[str, Any] = self.dummy_tokenizer
A_ : List[str] = self.dummy_transformer
A_ : Optional[Any] = VQDiffusionScheduler(self.num_embed )
A_ : int = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A_ : List[str] = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
A_ : Optional[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = 'teddy bear playing in the pool'
A_ : Any = torch.Generator(device=a__ ).manual_seed(0 )
A_ : Optional[int] = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type='np' )
A_ : Optional[Any] = output.images
A_ : Optional[Any] = torch.Generator(device=a__ ).manual_seed(0 )
A_ : Dict = pipe(
[prompt] , generator=a__ , output_type='np' , return_dict=a__ , num_inference_steps=2 )[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
A_ : str = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
A_ : int = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
A_ : List[Any] = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A_ : str = torch.Generator(device=a__ ).manual_seed(0 )
A_ : List[Any] = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=a__ , output_type='np' , )
A_ : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 140 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def _A (__a ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : List[str] = f'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor()
_SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__A : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
__A : List[str] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__A : Union[str, Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__A : Any = sorted(arg_to_scheduler.keys())
__A : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class A_ (pl.LightningModule ):
def __init__( self , _A , _A=None , _A="base" , _A=None , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(a__ )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=a__ , **a__ , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , a__ , a__ ):
assert hasattr(self.config , a__ ), F"""model config doesn\'t have a `{p}` attribute"""
setattr(self.config , a__ , getattr(self.hparams , a__ ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=a__ , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=a__ , )
else:
UpperCAmelCase = model
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*a__ , **a__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
a__ , lr=self.hparams.learning_rate , scale_parameter=a__ , relative_step=a__ )
else:
UpperCAmelCase = AdamW(
a__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _lowercase ( self , _A , _A ):
'''simple docstring'''
return self.validation_step(a__ , a__ )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.validation_end(a__ )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _lowercase ( self , _A ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=a__ )
UpperCAmelCase = len(self.train_dataloader().dataset )
def _lowercase ( self , _A , _A , _A = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def _lowercase ( self ):
'''simple docstring'''
return self.train_loader
def _lowercase ( self ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=a__ )
def _lowercase ( self ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=a__ )
def _lowercase ( self , _A ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
a__ , list(filter(a__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(a__ )
self.tokenizer.save_pretrained(a__ )
@staticmethod
def _lowercase ( _A , _A ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=a__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(a__ ).parent / '''test_run''' / '''cache''' ) , type=a__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=a__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=a__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=a__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=a__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=a__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=a__ , metavar=a__ , type=a__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=a__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=a__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=a__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=a__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=a__ )
parser.add_argument('''--train_batch_size''' , default=3_2 , type=a__ )
parser.add_argument('''--eval_batch_size''' , default=3_2 , type=a__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class A_ (pl.Callback ):
def _lowercase ( self , _A , _A ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A_ (pl.Callback ):
def _lowercase ( self , _A , _A ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(a__ )
class A_ (pl.Callback ):
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(a__ )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(a__ , '''w''' ) as writer:
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(UpperCamelCase__ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=UpperCamelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=UpperCamelCase__ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=UpperCamelCase__ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=UpperCamelCase__ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=UpperCamelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=UpperCamelCase__ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(UpperCamelCase__ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=UpperCamelCase__ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[] , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=UpperCamelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(UpperCamelCase__ )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 16
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
UpperCamelCase__ , weights_summary=UpperCamelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=UpperCamelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **UpperCamelCase__ , )
if args.do_train:
trainer.fit(UpperCamelCase__ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 273 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "upernet"
def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a__ , a__ ):
snake_case_ = backbone_config.get("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(a__ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
'''simple docstring'''
from statistics import mean, stdev
def __UpperCAmelCase ( A : list , A : int = 3 ) -> Dict:
UpperCAmelCase_ : Any = min(A )
UpperCAmelCase_ : str = max(A )
# normalize data
return [round((x - x_min) / (x_max - x_min) , A ) for x in data]
def __UpperCAmelCase ( A : list , A : int = 3 ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = mean(A )
UpperCAmelCase_ : List[str] = stdev(A )
# standardize data
return [round((x - mu) / (sigma) , A ) for x in data]
| 304 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowercase__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowercase__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
lowercase__ = BeautifulSoup(res.text, 'html.parser')
lowercase__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 290 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
__A : Optional[Any] = IFInpaintingPipeline
__A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__A : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__A : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self._get_dummy_components()
def __lowercase ( self , lowercase , lowercase=0) -> Dict:
'''simple docstring'''
if str(a__).startswith('mps'):
a__ : List[Any] = torch.manual_seed(a__)
else:
a__ : Any = torch.Generator(device=a__).manual_seed(a__)
a__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__)).to(a__)
a__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__)).to(a__)
a__ : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def __lowercase ( self) -> Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1)
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def __lowercase ( self) -> str:
'''simple docstring'''
self._test_save_load_local()
def __lowercase ( self) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 99 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if number > 0:
raise ValueError('''input must be a negative integer''' )
A_ : List[str] = len(bin(SCREAMING_SNAKE_CASE )[3:] )
A_ : Dict = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
A_ : Union[str, Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Tuple:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCamelCase__ = cst_fwd.get(__A , np.inf )
UpperCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCamelCase__ = new_cost_f
UpperCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _UpperCamelCase ( __A , __A , __A , __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = -1
UpperCamelCase__ = set()
UpperCamelCase__ = set()
UpperCamelCase__ = {source: 0}
UpperCamelCase__ = {destination: 0}
UpperCamelCase__ = {source: None}
UpperCamelCase__ = {destination: None}
UpperCamelCase__ = PriorityQueue()
UpperCamelCase__ = PriorityQueue()
UpperCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCamelCase__ , UpperCamelCase__ = queue_forward.get()
visited_forward.add(__A )
UpperCamelCase__ , UpperCamelCase__ = queue_backward.get()
visited_backward.add(__A )
UpperCamelCase__ = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
UpperCamelCase__ = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCamelCase__ = shortest_distance
return shortest_path_distance
a__ : Tuple = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
a__ : Dict = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 85 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: List[Any] ) -> Dict:
debug_launcher(test_script.main )
def A__ ( self: int ) -> Tuple:
debug_launcher(test_ops.main )
| 345 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 1000 ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = -1
_SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
_SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
_SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
_SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[int] ):
'''simple docstring'''
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : int = logging.get_logger(__name__)
a : Any = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( lowercase_ , lowercase_ ):
A = "swin"
A = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=96, SCREAMING_SNAKE_CASE_=[2, 2, 6, 2], SCREAMING_SNAKE_CASE_=[3, 6, 12, 24], SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=4.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-5, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(**a__ )
UpperCAmelCase_: Optional[Any] = image_size
UpperCAmelCase_: str = patch_size
UpperCAmelCase_: str = num_channels
UpperCAmelCase_: Optional[Any] = embed_dim
UpperCAmelCase_: Optional[Any] = depths
UpperCAmelCase_: List[str] = len(a__ )
UpperCAmelCase_: Any = num_heads
UpperCAmelCase_: int = window_size
UpperCAmelCase_: Union[str, Any] = mlp_ratio
UpperCAmelCase_: Optional[Any] = qkv_bias
UpperCAmelCase_: Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_: str = attention_probs_dropout_prob
UpperCAmelCase_: Any = drop_path_rate
UpperCAmelCase_: List[Any] = hidden_act
UpperCAmelCase_: Optional[Any] = use_absolute_embeddings
UpperCAmelCase_: Any = layer_norm_eps
UpperCAmelCase_: List[str] = initializer_range
UpperCAmelCase_: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_: Union[str, Any] = int(embed_dim * 2 ** (len(a__ ) - 1) )
UpperCAmelCase_: List[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1, len(a__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = get_aligned_output_features_output_indices(
out_features=a__, out_indices=a__, stage_names=self.stage_names )
class _a ( lowercase_ ):
A = version.parse('''1.11''' )
@property
def __snake_case (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __snake_case (self ) -> float:
return 1E-4
| 147 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = data
A_ : Optional[int] = None
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Optional[Any] = None
A_ : List[Any] = None
def __iter__( self ):
"""simple docstring"""
A_ : Tuple = self.head
while self.head:
yield node.data
A_ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(a__ ) for item in iter(self ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
self.insert_nth(len(self ) , a__ )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
self.insert_nth(0 , a__ )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
A_ : Optional[Any] = Node(a__ )
if self.head is None:
A_ : str = new_node # first node points itself
A_ : Optional[Any] = new_node
elif index == 0: # insert at head
A_ : Optional[Any] = self.head
A_ : Optional[Any] = new_node
else:
A_ : int = self.head
for _ in range(index - 1 ):
A_ : Union[str, Any] = temp.next
A_ : List[str] = temp.next
A_ : Optional[Any] = new_node
if index == len(self ) - 1: # insert at tail
A_ : Any = new_node
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase_ ( self , lowercase = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
A_ : Any = self.head
if self.head == self.tail: # just one node
A_ : str = None
elif index == 0: # delete head node
A_ : Any = self.tail.next.next
A_ : Dict = self.head.next
else:
A_ : List[Any] = self.head
for _ in range(index - 1 ):
A_ : Optional[Any] = temp.next
A_ : List[str] = temp.next
A_ : List[str] = temp.next.next
if index == len(self ) - 1: # delete at tail
A_ : Tuple = temp
return delete_node.data
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self ) == 0
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = CircularLinkedList()
assert len(__lowercase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__lowercase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__lowercase ) == i
circular_linked_list.insert_nth(__lowercase ,i + 1 )
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 ,6 ) )
circular_linked_list.insert_tail(6 )
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 ,7 ) )
circular_linked_list.insert_head(0 )
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(0 ,7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 ,6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 ,3 )
assert str(__lowercase ) == "->".join(str(__lowercase ) for i in range(1 ,6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : List[Any] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = "" ) -> str:
'''simple docstring'''
UpperCAmelCase = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
UpperCAmelCase = soup.find_all('''td''' , attrs='''titleColumn''' )
UpperCAmelCase = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(UpperCamelCase__ , UpperCamelCase__ )
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = "IMDb_Top_250_Movies.csv" ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = get_imdb_top_aaa_movies()
with open(UpperCamelCase__ , '''w''' , newline='''''' ) as out_file:
UpperCAmelCase = csv.writer(UpperCamelCase__ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 273 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def __UpperCAmelCase ( A : List[str] , A : Optional[Any] , A : str ) -> List[Any]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , A )
UpperCAmelCase_ : str = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase_ : Optional[int] = dataset_size < in_memory_max_size
else:
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = is_small_dataset(A )
assert result == expected
| 304 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class __snake_case ( lowercase_ ):
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 ) ->Optional[Any]:
return LambdaLR(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE : 1 , last_epoch=_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 ) ->Tuple:
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1.0 , _SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 ) ->Tuple:
a__: Union[str, Any] = {}
a__: int = step_rules.split(',' )
for rule_str in rule_list[:-1]:
a__ , a__: Optional[int] = rule_str.split(':' )
a__: Union[str, Any] = int(_SCREAMING_SNAKE_CASE )
a__: Tuple = float(_SCREAMING_SNAKE_CASE )
a__: Any = value
a__: Dict = float(rule_list[-1] )
def create_rules_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def rule_func(_SCREAMING_SNAKE_CASE ) -> float:
a__: Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
a__: Optional[int] = create_rules_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=-1 ) ->str:
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.5 , _SCREAMING_SNAKE_CASE = -1 ) ->Dict:
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
a__: Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = -1 ) ->Optional[Any]:
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
a__: List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1e-7 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=-1 ) ->Tuple:
a__: List[Any] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(_SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
a__: int = lr_init - lr_end
a__: str = num_training_steps - num_warmup_steps
a__: Tuple = 1 - (current_step - num_warmup_steps) / decay_steps
a__: Any = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = -1 , ) ->List[Any]:
a__: str = SchedulerType(_SCREAMING_SNAKE_CASE )
a__: Dict = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_SCREAMING_SNAKE_CASE , step_rules=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , num_cycles=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , power=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , )
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
| 290 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=2 , lowercase=True , lowercase=False , lowercase=10 , lowercase=3 , lowercase=32 * 4 , lowercase=32 * 6 , lowercase=4 , lowercase=32 , ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = parent
a__ : Tuple = batch_size
a__ : Optional[Any] = is_training
a__ : Optional[Any] = use_auxiliary_loss
a__ : Any = num_queries
a__ : Optional[int] = num_channels
a__ : Optional[Any] = min_size
a__ : Any = max_size
a__ : Optional[Any] = num_labels
a__ : Optional[Any] = mask_feature_size
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
a__)
a__ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a__)
a__ : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a__) > 0.5
).float()
a__ : str = (torch.rand((self.batch_size, self.num_labels) , device=a__) > 0.5).long()
a__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ , a__ , a__ , a__ : List[str] = self.prepare_config_and_inputs()
a__ : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Any = output.encoder_hidden_states
a__ : int = output.pixel_decoder_hidden_states
a__ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(a__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(a__) , config.decoder_config.decoder_layers)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase=False) -> int:
'''simple docstring'''
with torch.no_grad():
a__ : Any = MaskFormerModel(config=a__)
model.to(a__)
model.eval()
a__ : int = model(pixel_values=a__ , pixel_mask=a__)
a__ : str = model(a__ , output_hidden_states=a__)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(a__ , a__)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple = MaskFormerForInstanceSegmentation(config=a__)
model.to(a__)
model.eval()
def comm_check_on_output(lowercase):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
a__ : Union[str, Any] = model(pixel_values=a__ , pixel_mask=a__)
a__ : int = model(a__)
comm_check_on_output(a__)
a__ : Optional[int] = model(
pixel_values=a__ , pixel_mask=a__ , mask_labels=a__ , class_labels=a__)
comm_check_on_output(a__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class A__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
__A : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A : Dict = False
__A : List[Any] = False
__A : Optional[Any] = False
__A : List[str] = False
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = MaskFormerModelTester(self)
a__ : List[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__)
def __lowercase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a__ , **a__ , output_hidden_states=a__)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*a__)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def __lowercase ( self) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(a__)
a__ : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a__)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
a__ : Optional[Any] = MaskFormerModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = (self.model_tester.min_size,) * 2
a__ : Union[str, Any] = {
'pixel_values': torch.randn((2, 3, *size) , device=a__),
'mask_labels': torch.randn((2, 10, *size) , device=a__),
'class_labels': torch.zeros(2 , 10 , device=a__).long(),
}
a__ : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(a__)
a__ : Optional[int] = model(**a__)
self.assertTrue(outputs.loss is not None)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a__ , **a__ , output_hidden_states=a__)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(a__).to(a__)
a__ : List[str] = model(**a__ , output_attentions=a__)
self.assertTrue(outputs.attentions is not None)
def __lowercase ( self) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a__ : List[str] = self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs()
a__ : List[str] = model_class(a__)
model.to(a__)
model.train()
a__ : List[str] = model(a__ , mask_labels=a__ , class_labels=a__).loss
loss.backward()
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs()
a__ : str = True
a__ : Optional[int] = True
a__ : Optional[Any] = model_class(a__)
model.to(a__)
model.train()
a__ : int = model(a__ , mask_labels=a__ , class_labels=a__)
a__ : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a__ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
lowercase : int = 1e-4
def A_ ( ) -> str:
a__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> str:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(a__)
a__ : Any = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Dict = image_processor(a__ , return_tensors='pt').to(a__)
a__ : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(a__ , (1, 3, 800, 1088))
with torch.no_grad():
a__ : List[Any] = model(**a__)
a__ : Optional[Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]]).to(a__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a__ , atol=a__))
a__ : int = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]]).to(a__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a__ , atol=a__))
a__ : Tuple = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]]).to(a__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a__ , atol=a__))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(a__)
.eval()
)
a__ : Any = self.default_image_processor
a__ : Any = prepare_img()
a__ : List[Any] = image_processor(a__ , return_tensors='pt').to(a__)
a__ : Tuple = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(a__ , (1, 3, 800, 1088))
with torch.no_grad():
a__ : List[Any] = model(**a__)
# masks_queries_logits
a__ : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a__ : Optional[int] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
a__ : Optional[Any] = torch.tensor(a__).to(a__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a__ , atol=a__))
# class_queries_logits
a__ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a__ : int = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
]).to(a__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a__ , atol=a__))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(a__)
.eval()
)
a__ : List[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : int = image_processor(a__ , return_tensors='pt').to(a__)
a__ : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(a__ , (1, 3, 800, 1088))
with torch.no_grad():
a__ : List[Any] = model(**a__)
# masks_queries_logits
a__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a__ : Optional[int] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.7711]]
a__ : Any = torch.tensor(a__).to(a__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a__ , atol=a__))
# class_queries_logits
a__ : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a__ : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]]).to(a__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a__ , atol=a__))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(a__)
.eval()
)
a__ : Union[str, Any] = self.default_image_processor
a__ : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , )
a__ : List[Any] = inputs['pixel_values'].to(a__)
a__ : str = [el.to(a__) for el in inputs['mask_labels']]
a__ : int = [el.to(a__) for el in inputs['class_labels']]
with torch.no_grad():
a__ : str = model(**a__)
self.assertTrue(outputs.loss is not None)
| 99 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.