code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
# vision encoder
if "img_encoder.pos_embed" in name:
_UpperCAmelCase : Union[str, Any] = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
_UpperCAmelCase : int = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
_UpperCAmelCase : List[str] = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
_UpperCAmelCase : Any = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
_UpperCAmelCase : Union[str, Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
_UpperCAmelCase : Union[str, Any] = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
_UpperCAmelCase : List[str] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
_UpperCAmelCase : List[Any] = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
_UpperCAmelCase : str = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
_UpperCAmelCase : Any = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
_UpperCAmelCase : Optional[Any] = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
_UpperCAmelCase : Union[str, Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
_UpperCAmelCase : List[str] = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
_UpperCAmelCase : List[Any] = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
_UpperCAmelCase : Optional[int] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
_UpperCAmelCase : int = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
_UpperCAmelCase : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
_UpperCAmelCase : str = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
_UpperCAmelCase : List[str] = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
_UpperCAmelCase : Dict = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
_UpperCAmelCase : Dict = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
_UpperCAmelCase : Union[str, Any] = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
_UpperCAmelCase : Dict = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
_UpperCAmelCase : Tuple = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : Union[str, Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCAmelCase : int = key.split('''.''' )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = int(key_split[2] ), int(key_split[4] )
_UpperCAmelCase : Tuple = config.vision_config.hidden_size
if "weight" in key:
_UpperCAmelCase : Optional[Any] = val[:dim, :]
_UpperCAmelCase : List[Any] = val[dim : dim * 2, :]
_UpperCAmelCase : str = val[-dim:, :]
else:
_UpperCAmelCase : Dict = val[:dim]
_UpperCAmelCase : List[Any] = val[dim : dim * 2]
_UpperCAmelCase : List[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCAmelCase : Optional[int] = key.split('''.''' )
_UpperCAmelCase : int = int(key_split[3] )
_UpperCAmelCase : int = config.text_config.hidden_size
if "weight" in key:
_UpperCAmelCase : Union[str, Any] = val[:dim, :]
_UpperCAmelCase : Any = val[
dim : dim * 2, :
]
_UpperCAmelCase : Optional[Any] = val[-dim:, :]
else:
_UpperCAmelCase : Optional[int] = val[:dim]
_UpperCAmelCase : List[str] = val[dim : dim * 2]
_UpperCAmelCase : List[str] = val[-dim:]
else:
_UpperCAmelCase : Optional[Any] = rename_key(UpperCamelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_UpperCAmelCase : Union[str, Any] = val.squeeze_()
else:
_UpperCAmelCase : str = val
return orig_state_dict
def lowerCamelCase_ ():
_UpperCAmelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]="groupvit-gcc-yfcc" , UpperCamelCase__ : Any=False ):
_UpperCAmelCase : Any = GroupViTConfig()
_UpperCAmelCase : int = GroupViTModel(UpperCamelCase__ ).eval()
_UpperCAmelCase : Any = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : List[Any] = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase__ ) == 0)
# verify result
_UpperCAmelCase : Any = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors='''pt''' )
with torch.no_grad():
_UpperCAmelCase : Dict = model(**UpperCamelCase__ )
if model_name == "groupvit-gcc-yfcc":
_UpperCAmelCase : Tuple = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_UpperCAmelCase : str = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print('''Successfully saved processor and model to''' , UpperCamelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(UpperCamelCase__ , organization='''nielsr''' )
model.push_to_hub(UpperCamelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
_lowerCAmelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_lowerCAmelCase :Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 263 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 1 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase_ (UpperCamelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ (UpperCamelCase__ : int = 1_0001 ):
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f"{solution() = }")
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=1_3 , A=3_0 , A=2 , A=3 , A=True , A=True , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , ) -> Dict:
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : List[str] = (image_size // patch_size) ** 2
_UpperCAmelCase : List[str] = num_patches + 1
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values
def __lowerCAmelCase ( self , A , A ) -> List[str]:
_UpperCAmelCase : str = FlaxViTModel(config=A )
_UpperCAmelCase : List[str] = model(A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Tuple = (self.image_size, self.image_size)
_UpperCAmelCase : List[Any] = (self.patch_size, self.patch_size)
_UpperCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCAmelCase ( self , A , A ) -> List[str]:
_UpperCAmelCase : Any = self.type_sequence_label_size
_UpperCAmelCase : Tuple = FlaxViTForImageClassification(config=A )
_UpperCAmelCase : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : List[str] = FlaxViTForImageClassification(A )
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Union[str, Any] = model(A )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Any = config_and_inputs
_UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCAmelCase ( self ) -> None:
_UpperCAmelCase : List[str] = FlaxViTModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(A )
_UpperCAmelCase : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Dict = self._prepare_for_class(A , A )
_UpperCAmelCase : Union[str, Any] = model_class(A )
@jax.jit
def model_jitted(A , **A ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
_UpperCAmelCase : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCAmelCase : Tuple = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
_UpperCAmelCase : Tuple = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase_ (UpperCamelCase__ : Iterable[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = iter(UpperCamelCase__ )
while True:
_UpperCAmelCase : Optional[int] = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Union[str, Any] = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCAmelCase : Optional[int] = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def lowerCamelCase_ (UpperCamelCase__ : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
_UpperCAmelCase : Dict = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCAmelCase : List[str] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : str = generate_table(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = prepare_input(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
_UpperCAmelCase , _UpperCAmelCase : str = divmod(table.index(UpperCamelCase__ ) , 5 )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : Tuple = generate_table(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
_UpperCAmelCase , _UpperCAmelCase : Any = divmod(table.index(UpperCamelCase__ ) , 5 )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
# Return True if there is node that has not iterated.
_UpperCAmelCase : str = [False] * len(UpperCamelCase__ )
_UpperCAmelCase : List[str] = [s]
_UpperCAmelCase : str = True
while queue:
_UpperCAmelCase : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Union[str, Any] = u
return visited[t]
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : Any = [-1] * (len(UpperCamelCase__ ))
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Any = []
_UpperCAmelCase : int = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : List[str] = float('''Inf''' )
_UpperCAmelCase : Tuple = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase : Tuple = min(UpperCamelCase__ , graph[parent[s]][s] )
_UpperCAmelCase : Dict = parent[s]
max_flow += path_flow
_UpperCAmelCase : Union[str, Any] = sink
while v != source:
_UpperCAmelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase : Optional[int] = parent[v]
for i in range(len(UpperCamelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 263 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 1 |
"""simple docstring"""
import math
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : List[Any] = int(math.sqrt(UpperCamelCase__ ) ) # Size of every segment
_UpperCAmelCase : Optional[Any] = [True] * (end + 1)
_UpperCAmelCase : Optional[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(UpperCamelCase__ )
for i in range(start * start , end + 1 , UpperCamelCase__ ):
_UpperCAmelCase : Any = False
start += 1
prime += in_prime
_UpperCAmelCase : Any = end + 1
_UpperCAmelCase : List[Any] = min(2 * end , UpperCamelCase__ )
while low <= n:
_UpperCAmelCase : str = [True] * (high - low + 1)
for each in in_prime:
_UpperCAmelCase : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(UpperCamelCase__ , high + 1 , UpperCamelCase__ ):
_UpperCAmelCase : str = False
for j in range(len(UpperCamelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
_UpperCAmelCase : str = high + 1
_UpperCAmelCase : Optional[int] = min(high + end , UpperCamelCase__ )
return prime
print(sieve(10**6))
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
_lowerCAmelCase :Dict = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''xmod'''
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> Any:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : Any = classifier_dropout
_UpperCAmelCase : Union[str, Any] = pre_norm
_UpperCAmelCase : Dict = adapter_reduction_factor
_UpperCAmelCase : int = adapter_layer_norm
_UpperCAmelCase : Union[str, Any] = adapter_reuse_layer_norm
_UpperCAmelCase : Optional[Any] = ln_before_adapter
_UpperCAmelCase : Union[str, Any] = list(A )
_UpperCAmelCase : Optional[int] = default_language
class _UpperCAmelCase ( a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 263 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Any = logging.get_logger(__name__)
_lowerCAmelCase :List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
_lowerCAmelCase :Any = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
_lowerCAmelCase :List[str] = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
@lru_cache()
def lowerCamelCase_ ():
_UpperCAmelCase : Any = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCAmelCase : Optional[Any] = bs[:]
_UpperCAmelCase : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase : Optional[Any] = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = set()
_UpperCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : str = char
return pairs
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Union[str, Any]:
_UpperCAmelCase : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
_UpperCAmelCase : List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
_UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
_UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase : List[Any] = json.load(A )
_UpperCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : Tuple = errors # how to handle errors in decoding
_UpperCAmelCase : List[str] = bytes_to_unicode()
_UpperCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding='''utf-8''' ) as merges_handle:
_UpperCAmelCase : Any = merges_handle.read().split('''\n''' )[1:-1]
_UpperCAmelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase : Union[str, Any] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase : Optional[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __lowerCAmelCase ( self ) -> str:
return len(self.encoder )
def __lowerCAmelCase ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , A ) -> Tuple:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : Optional[int] = tuple(A )
_UpperCAmelCase : List[str] = get_pairs(A )
if not pairs:
return token
while True:
_UpperCAmelCase : Optional[int] = min(A , key=lambda A : self.bpe_ranks.get(A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase : Tuple = bigram
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[int] = 0
while i < len(A ):
try:
_UpperCAmelCase : Optional[Any] = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase : Optional[int] = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : Optional[Any] = tuple(A )
_UpperCAmelCase : int = new_word
if len(A ) == 1:
break
else:
_UpperCAmelCase : Any = get_pairs(A )
_UpperCAmelCase : List[Any] = ''' '''.join(A )
_UpperCAmelCase : int = word
return word
def __lowerCAmelCase ( self , A ) -> Optional[int]:
_UpperCAmelCase : int = []
for token in re.findall(self.pat , A ):
_UpperCAmelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(''' ''' ) )
return bpe_tokens
def __lowerCAmelCase ( self , A ) -> str:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , A ) -> str:
return self.decoder.get(A )
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
_UpperCAmelCase : int = ''''''.join(A )
_UpperCAmelCase : str = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : Optional[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + '''\n''' )
_UpperCAmelCase : Dict = 0
with open(A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_UpperCAmelCase : Dict = token_index
writer.write(''' '''.join(A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : str = [self.cls_token_id]
_UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , A , A=False , **A ) -> int:
_UpperCAmelCase : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
_UpperCAmelCase : str = ''' ''' + text
return (text, kwargs)
| 263 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowerCAmelCase :str = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowerCAmelCase :Tuple = TaTokenizerFast
_lowerCAmelCase :List[Any] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Union[str, Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowerCAmelCase :Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 263 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Optional[Any] = abs(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = abs(UpperCamelCase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCamelCase_ (UpperCamelCase__ : int ):
return sum(int(UpperCamelCase__ ) for c in str(abs(UpperCamelCase__ ) ) )
def lowerCamelCase_ ():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase__ : Callable , UpperCamelCase__ : int ) -> None:
_UpperCAmelCase : Optional[int] = F'{func.__name__}({value})'
_UpperCAmelCase : Any = timeit(F'__main__.{call}' , setup='''import __main__''' )
print(F'{call:56} = {func(UpperCamelCase__ )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =DiTPipeline
a__ =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a__ =PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
a__ =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a__ =False
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=A , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_0_0_0 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=A , )
_UpperCAmelCase : Optional[Any] = AutoencoderKL()
_UpperCAmelCase : int = DDIMScheduler()
_UpperCAmelCase : Optional[int] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[Any] = torch.manual_seed(A )
else:
_UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = '''cpu'''
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : List[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
_UpperCAmelCase : Tuple = pipe(**A ).images
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
_UpperCAmelCase : List[Any] = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_UpperCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=A , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[Any] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_UpperCAmelCase : int = pipe.get_label_ids(A )
_UpperCAmelCase : List[Any] = pipe(A , generator=A , num_inference_steps=4_0 , output_type='''np''' ).images
for word, image in zip(A , A ):
_UpperCAmelCase : Union[str, Any] = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Dict = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_UpperCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_UpperCAmelCase : int = ['''vase''', '''umbrella''']
_UpperCAmelCase : Union[str, Any] = pipe.get_label_ids(A )
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe(A , generator=A , num_inference_steps=2_5 , output_type='''np''' ).images
for word, image in zip(A , A ):
_UpperCAmelCase : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase :str = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A = None , A = None , A=None , A=None ) -> Any:
if not conversation_id:
_UpperCAmelCase : Dict = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase : Any = []
if generated_responses is None:
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : uuid.UUID = conversation_id
_UpperCAmelCase : List[str] = past_user_inputs
_UpperCAmelCase : List[str] = generated_responses
_UpperCAmelCase : Optional[str] = text
def __eq__( self , A ) -> List[Any]:
if not isinstance(A , A ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __lowerCAmelCase ( self , A , A = False ) -> List[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
_UpperCAmelCase : int = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_UpperCAmelCase : Optional[int] = text
def __lowerCAmelCase ( self ) -> Tuple:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase : List[Any] = None
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
self.generated_responses.append(A )
def __lowerCAmelCase ( self ) -> Tuple:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Any:
_UpperCAmelCase : Union[str, Any] = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCAmelCase : List[str] = '''user''' if is_user else '''bot'''
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
a ,R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' ,)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> str:
super().__init__(*A , **A )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase : Dict = self.tokenizer.eos_token
def __lowerCAmelCase ( self , A=None , A=None , A=None , **A ) -> Tuple:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Dict = {}
if min_length_for_response is not None:
_UpperCAmelCase : Tuple = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase : int = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase : Dict = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : Optional[int] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A , A=0 , **A ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = super().__call__(A , num_workers=A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs
def __lowerCAmelCase ( self , A , A=3_2 ) -> Dict[str, Any]:
if not isinstance(A , A ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
_UpperCAmelCase : int = self.tokenizer._build_conversation_input_ids(A )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase : Any = self._legacy_parse_and_tokenize(A )
if self.framework == "pt":
_UpperCAmelCase : str = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __lowerCAmelCase ( self , A , A=1_0 , **A ) -> int:
_UpperCAmelCase : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
_UpperCAmelCase : int = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_UpperCAmelCase : List[Any] = max_length - minimum_tokens
_UpperCAmelCase : str = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase : Dict = model_inputs['''attention_mask'''][:, -trim:]
_UpperCAmelCase : Optional[Any] = model_inputs.pop('''conversation''' )
_UpperCAmelCase : Optional[int] = max_length
_UpperCAmelCase : str = self.model.generate(**A , **A )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase : Tuple = 1
else:
_UpperCAmelCase : Tuple = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __lowerCAmelCase ( self , A , A=True ) -> Union[str, Any]:
_UpperCAmelCase : str = model_outputs['''output_ids''']
_UpperCAmelCase : List[str] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
_UpperCAmelCase : List[str] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(A )
return conversation
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : Tuple = self.tokenizer.eos_token_id
_UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A , add_special_tokens=A ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A , add_special_tokens=A ) )
if len(A ) > self.tokenizer.model_max_length:
_UpperCAmelCase : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 263 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A = 6 ) -> None:
_UpperCAmelCase : Node | None = None
_UpperCAmelCase : Node | None = None
self.create_linked_list(A )
def __lowerCAmelCase ( self , A ) -> None:
_UpperCAmelCase : int = Node()
_UpperCAmelCase : Any = current_node
_UpperCAmelCase : Optional[Any] = current_node
_UpperCAmelCase : Tuple = current_node
for _ in range(1 , A ):
_UpperCAmelCase : List[str] = Node()
_UpperCAmelCase : List[str] = current_node
_UpperCAmelCase : List[str] = previous_node
_UpperCAmelCase : Dict = current_node
_UpperCAmelCase : int = self.front
_UpperCAmelCase : Optional[int] = previous_node
def __lowerCAmelCase ( self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCAmelCase ( self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCAmelCase ( self , A ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_UpperCAmelCase : int = self.rear.next
if self.rear:
_UpperCAmelCase : Dict = data
def __lowerCAmelCase ( self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_UpperCAmelCase : int = self.front.data
_UpperCAmelCase : List[Any] = None
return data
_UpperCAmelCase : Optional[Any] = self.front
_UpperCAmelCase : Dict = old_front.next
_UpperCAmelCase : Dict = old_front.data
_UpperCAmelCase : List[str] = None
return data
def __lowerCAmelCase ( self ) -> None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCAmelCase ( self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
_UpperCAmelCase : Any | None = None
_UpperCAmelCase : Node | None = None
_UpperCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase :int = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : Any ):
_UpperCAmelCase : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
_UpperCAmelCase : Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
_UpperCAmelCase : Dict = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : Optional[Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_UpperCAmelCase : Dict = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(UpperCamelCase__ )-1}' )
if "norm" in key:
_UpperCAmelCase : Any = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : Dict = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
_UpperCAmelCase : Optional[int] = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(UpperCamelCase__ )-1}' )
if "layer_norm1" in key:
_UpperCAmelCase : int = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Union[str, Any] = key[key.find('''block''' ) + len('''block''' )]
_UpperCAmelCase : Optional[int] = key.replace(F'block{idx}' , F'block.{int(UpperCamelCase__ )-1}' )
if "attn.q" in key:
_UpperCAmelCase : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_UpperCAmelCase : Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_UpperCAmelCase : Tuple = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_UpperCAmelCase : List[str] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_UpperCAmelCase : Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_UpperCAmelCase : Any = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : Dict = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_UpperCAmelCase : List[str] = key.replace(F'linear_c{idx}' , F'linear_c.{int(UpperCamelCase__ )-1}' )
if "bot_conv" in key:
_UpperCAmelCase : List[Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
_UpperCAmelCase : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[Any] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
_UpperCAmelCase : Tuple = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
_UpperCAmelCase : Optional[Any] = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
_UpperCAmelCase : int = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
_UpperCAmelCase : int = key.replace('''module.last_layer_depth''' , '''head.head''' )
_UpperCAmelCase : Tuple = value
return new_state_dict
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Optional[int] = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_UpperCAmelCase : Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Dict = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : str = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Tuple = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[int]=None ):
_UpperCAmelCase : Union[str, Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : List[str] = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : Tuple = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
_UpperCAmelCase : Any = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCAmelCase : Tuple = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__ , UpperCamelCase__ )
# create HuggingFace model and load state dict
_UpperCAmelCase : str = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
_UpperCAmelCase : List[str] = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : int = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_UpperCAmelCase : List[Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_UpperCAmelCase : Union[str, Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
_lowerCAmelCase :str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
_lowerCAmelCase :Union[str, Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 263 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
def count_of_possible_combinations(UpperCamelCase__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
def count_of_possible_combinations_with_dp_array(
UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_UpperCAmelCase : Dict = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase__ )
for item in array )
_UpperCAmelCase : Union[str, Any] = answer
return answer
_UpperCAmelCase : int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_UpperCAmelCase : Union[str, Any] = [0] * (target + 1)
_UpperCAmelCase : int = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase :Tuple = 3
_lowerCAmelCase :Union[str, Any] = 5
_lowerCAmelCase :Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 1 |
"""simple docstring"""
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
_UpperCAmelCase : Dict = ''''''
_UpperCAmelCase : Dict = ''''''
_UpperCAmelCase : int = []
def __lowerCAmelCase ( self , A , A ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCAmelCase : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCAmelCase : Optional[int] = self.__min_dist_top_down_dp(A , n - 1 )
_UpperCAmelCase : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A )
_UpperCAmelCase : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCAmelCase : Tuple = 1 + min(A , A , A )
return self.dp[m][n]
def __lowerCAmelCase ( self , A , A ) -> int:
_UpperCAmelCase : List[Any] = worda
_UpperCAmelCase : Dict = worda
_UpperCAmelCase : Tuple = [[-1 for _ in range(len(A ) )] for _ in range(len(A ) )]
return self.__min_dist_top_down_dp(len(A ) - 1 , len(A ) - 1 )
def __lowerCAmelCase ( self , A , A ) -> int:
_UpperCAmelCase : Optional[int] = worda
_UpperCAmelCase : Dict = worda
_UpperCAmelCase : Union[str, Any] = len(A )
_UpperCAmelCase : List[str] = len(A )
_UpperCAmelCase : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCAmelCase : List[str] = j
elif j == 0: # second string is empty
_UpperCAmelCase : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCAmelCase : Optional[Any] = self.dp[i - 1][j - 1]
else:
_UpperCAmelCase : Any = self.dp[i][j - 1]
_UpperCAmelCase : Union[str, Any] = self.dp[i - 1][j]
_UpperCAmelCase : int = self.dp[i - 1][j - 1]
_UpperCAmelCase : int = 1 + min(A , A , A )
return self.dp[m][n]
if __name__ == "__main__":
_lowerCAmelCase :int = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_lowerCAmelCase :str = input('Enter the first string: ').strip()
_lowerCAmelCase :Any = input('Enter the second string: ').strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 263 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :List[str] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_lowerCAmelCase :Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_lowerCAmelCase :Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_lowerCAmelCase :Any = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase :int = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase :Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ =DPRContextEncoderTokenizer
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ =DPRQuestionEncoderTokenizer
_lowerCAmelCase :Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_lowerCAmelCase :Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_lowerCAmelCase :Optional[Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a )
class _UpperCAmelCase :
'''simple docstring'''
def __call__( self , A , A = None , A = None , A = False , A = False , A = None , A = None , A = None , **A , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_UpperCAmelCase : int = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_UpperCAmelCase : Optional[Any] = titles if not isinstance(A , A ) else [titles]
_UpperCAmelCase : int = texts if not isinstance(A , A ) else [texts]
_UpperCAmelCase : List[str] = len(A )
_UpperCAmelCase : List[str] = questions if not isinstance(A , A ) else [questions] * n_passages
assert len(A ) == len(
A ), f'There should be as many titles than texts but got {len(A )} titles and {len(A )} texts.'
_UpperCAmelCase : Optional[Any] = super().__call__(A , A , padding=A , truncation=A )['''input_ids''']
_UpperCAmelCase : Union[str, Any] = super().__call__(A , add_special_tokens=A , padding=A , truncation=A )['''input_ids''']
_UpperCAmelCase : Any = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A )
]
}
if return_attention_mask is not False:
_UpperCAmelCase : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase : str = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A )
def __lowerCAmelCase ( self , A , A , A = 1_6 , A = 6_4 , A = 4 , ) -> List[DPRSpanPrediction]:
_UpperCAmelCase : List[Any] = reader_input['''input_ids''']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = reader_output[:3]
_UpperCAmelCase : Dict = len(A )
_UpperCAmelCase : str = sorted(range(A ) , reverse=A , key=relevance_logits.__getitem__ )
_UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_UpperCAmelCase : str = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase : int = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase : Any = len(A )
_UpperCAmelCase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self , A , A , A , A , ) -> List[DPRSpanPrediction]:
_UpperCAmelCase : List[str] = []
for start_index, start_score in enumerate(A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase : int = sorted(A , key=lambda A : x[1] , reverse=A )
_UpperCAmelCase : Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
_UpperCAmelCase : Any = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a )
class _UpperCAmelCase ( a ,a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =READER_PRETRAINED_VOCAB_FILES_MAP
a__ =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =READER_PRETRAINED_INIT_CONFIGURATION
a__ =['''input_ids''', '''attention_mask''']
a__ =DPRReaderTokenizer
| 263 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''openai/whisper-base'''
a__ =(
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
a__ ='''transcriber'''
a__ =WhisperProcessor
a__ =WhisperForConditionalGeneration
a__ =['''audio''']
a__ =['''text''']
def __lowerCAmelCase ( self , A ) -> Dict:
return self.pre_processor(A , return_tensors='''pt''' ).input_features
def __lowerCAmelCase ( self , A ) -> Tuple:
return self.model.generate(inputs=A )
def __lowerCAmelCase ( self , A ) -> Any:
return self.pre_processor.batch_decode(A , skip_special_tokens=A )[0]
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCAmelCase :List[Any] = logging.getLogger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A=-1 ) -> Optional[Any]:
# in NER datasets, the last column is usually reserved for NER label
_UpperCAmelCase : Any = label_idx
def __lowerCAmelCase ( self , A , A ) -> List[InputExample]:
if isinstance(A , A ):
_UpperCAmelCase : Union[str, Any] = mode.value
_UpperCAmelCase : Any = os.path.join(A , f'{mode}.txt' )
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[str] = []
with open(A , encoding='''utf-8''' ) as f:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=A , labels=A ) )
guid_index += 1
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Tuple = []
else:
_UpperCAmelCase : Any = line.split(''' ''' )
words.append(splits[0] )
if len(A ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=A , labels=A ) )
return examples
def __lowerCAmelCase ( self , A , A , A ) -> Optional[int]:
_UpperCAmelCase : Dict = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(A )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_UpperCAmelCase : Optional[Any] = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(A )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __lowerCAmelCase ( self , A ) -> List[str]:
if path:
with open(A , '''r''' ) as f:
_UpperCAmelCase : Optional[int] = f.read().splitlines()
if "O" not in labels:
_UpperCAmelCase : List[Any] = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self ) -> Any:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __lowerCAmelCase ( self , A ) -> List[str]:
if path:
with open(A , '''r''' ) as f:
_UpperCAmelCase : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
_UpperCAmelCase : Optional[int] = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self , A , A ) -> List[InputExample]:
if isinstance(A , A ):
_UpperCAmelCase : Dict = mode.value
_UpperCAmelCase : Optional[int] = os.path.join(A , f'{mode}.txt' )
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Dict = []
with open(A , encoding='''utf-8''' ) as f:
for sentence in parse_incr(A ):
_UpperCAmelCase : int = []
_UpperCAmelCase : Tuple = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(A ) == len(A )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=A , labels=A ) )
guid_index += 1
return examples
def __lowerCAmelCase ( self , A , A , A ) -> List[Any]:
_UpperCAmelCase : Optional[int] = 0
for sentence in parse_incr(A ):
_UpperCAmelCase : Dict = preds_list[example_id]
_UpperCAmelCase : Dict = ''''''
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(A )
example_id += 1
def __lowerCAmelCase ( self , A ) -> List[str]:
if path:
with open(A , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 263 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Dict = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''distilbert'''
a__ ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , A=3_0_5_2_2 , A=5_1_2 , A=False , A=6 , A=1_2 , A=7_6_8 , A=4 * 7_6_8 , A=0.1 , A=0.1 , A="gelu" , A=0.02 , A=0.1 , A=0.2 , A=0 , **A , ) -> Dict:
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Tuple = sinusoidal_pos_embds
_UpperCAmelCase : int = n_layers
_UpperCAmelCase : List[str] = n_heads
_UpperCAmelCase : Dict = dim
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : List[Any] = activation
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = qa_dropout
_UpperCAmelCase : str = seq_classif_dropout
super().__init__(**A , pad_token_id=A )
class _UpperCAmelCase ( a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase :str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase :list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase :set[int] = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase :list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : tuple[int, ...] ):
_UpperCAmelCase : str = ""
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
_UpperCAmelCase : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def lowerCamelCase_ (UpperCamelCase__ : list[int] ):
_UpperCAmelCase : list[str] = []
for key in product(UpperCamelCase__ , repeat=3 ):
_UpperCAmelCase : Optional[int] = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def lowerCamelCase_ (UpperCamelCase__ : list[str] , UpperCamelCase__ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_ (UpperCamelCase__ : str = "p059_cipher.txt" ):
_UpperCAmelCase : list[int]
_UpperCAmelCase : list[str]
_UpperCAmelCase : str
_UpperCAmelCase : str
_UpperCAmelCase : str = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='''utf-8''' )
_UpperCAmelCase : List[str] = [int(UpperCamelCase__ ) for number in data.strip().split(''',''' )]
_UpperCAmelCase : Optional[Any] = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
_UpperCAmelCase : Union[str, Any] = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
_UpperCAmelCase : Optional[int] = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=4 , A="gelu" , A=0.0 , A=0.1 , A=True , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[str]:
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Any = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : List[str] = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_multiple_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : List[str] = weight_tying
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : Tuple = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : Dict = num_choices
_UpperCAmelCase : List[str] = scope
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : str = True
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self , A , A , A ) -> int:
_UpperCAmelCase : Optional[Any] = GPTNeoXJapaneseModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Dict = model(A , attention_mask=A )
_UpperCAmelCase : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Any = GPTNeoXJapaneseModel(A )
model.to(A )
model.eval()
_UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A ) -> str:
_UpperCAmelCase : str = GPTNeoXJapaneseForCausalLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Any = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , A , A , A ) -> str:
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Dict = GPTNeoXJapaneseForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_UpperCAmelCase : Any = model(A , attention_mask=A , use_cache=A )
_UpperCAmelCase : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A )
_UpperCAmelCase : Tuple = output_from_no_past['''hidden_states'''][0]
_UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_UpperCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = config_and_inputs
_UpperCAmelCase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a__ =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a__ =(
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a__ =False
a__ =False
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Tuple = GPTNeoXJapaneseModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=A , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def __lowerCAmelCase ( self ) -> str:
# This regression test was failing with PyTorch < 1.3
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
@slow
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[Any] = '''abeja/gpt-neox-japanese-2.7b'''
_UpperCAmelCase : str = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_UpperCAmelCase : Any = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_UpperCAmelCase : str = GPTNeoXJapaneseTokenizer.from_pretrained(A )
_UpperCAmelCase : Any = GPTNeoXJapaneseForCausalLM.from_pretrained(A )
_UpperCAmelCase : Tuple = []
for prompt in prompts:
_UpperCAmelCase : str = tokenizer(A , return_tensors='''pt''' ).input_ids
_UpperCAmelCase : List[Any] = model.generate(A , max_length=5_0 )
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(A , skip_special_tokens=A )
predicted_outputs += generated_string
self.assertListEqual(A , A )
| 263 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 1 |
"""simple docstring"""
import heapq
def lowerCamelCase_ (UpperCamelCase__ : dict ):
_UpperCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCamelCase__ , [-1 * len(UpperCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCAmelCase : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCAmelCase : Dict = heapq.heappop(UpperCamelCase__ )[1][0]
chosen_vertices.add(UpperCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCAmelCase : Tuple = elem[1][1].index(UpperCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase :Union[str, Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase :Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase :Tuple = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase :str = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __lowerCAmelCase ( self , A , A , A = CHRF.CHAR_ORDER , A = CHRF.WORD_ORDER , A = CHRF.BETA , A = False , A = False , A = False , ) -> Union[str, Any]:
_UpperCAmelCase : Dict = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCAmelCase : Any = [[refs[i] for refs in references] for i in range(A )]
_UpperCAmelCase : Union[str, Any] = CHRF(A , A , A , A , A , A )
_UpperCAmelCase : Any = sb_chrf.corpus_score(A , A )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 263 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : Dict = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = self.dummy_uncond_unet
_UpperCAmelCase : int = PNDMScheduler()
_UpperCAmelCase : Tuple = PNDMPipeline(unet=A , scheduler=A )
pndm.to(A )
pndm.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = pndm(generator=A , num_inference_steps=2_0 , output_type='''numpy''' ).images
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Any = pndm(generator=A , num_inference_steps=2_0 , output_type='''numpy''' , return_dict=A )[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : int = '''google/ddpm-cifar10-32'''
_UpperCAmelCase : Tuple = UNetaDModel.from_pretrained(A )
_UpperCAmelCase : Any = PNDMScheduler()
_UpperCAmelCase : int = PNDMPipeline(unet=A , scheduler=A )
pndm.to(A )
pndm.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pndm(generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCAmelCase :Tuple = HfArgumentParser(InitializationArguments)
_lowerCAmelCase :List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCAmelCase :int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCAmelCase :str = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_lowerCAmelCase :int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCAmelCase :Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 263 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int = 100 ):
_UpperCAmelCase : str = (n * (n + 1) // 2) ** 2
_UpperCAmelCase : Tuple = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 263 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase_ (UpperCamelCase__ : Dict="" ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
return os.path.join(UpperCamelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_UpperCAmelCase : int = AgentAudio(A )
_UpperCAmelCase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(A ) )
# Ensure that the file contains the same value as the original tensor
_UpperCAmelCase , _UpperCAmelCase : List[str] = sf.read(A )
self.assertTrue(torch.allclose(A , torch.tensor(A ) , atol=1E-4 ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_UpperCAmelCase : Optional[int] = get_new_path(suffix='''.wav''' )
sf.write(A , A , 1_6_0_0_0 )
_UpperCAmelCase : List[Any] = AgentAudio(A )
self.assertTrue(torch.allclose(A , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , A )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_UpperCAmelCase : str = AgentImage(A )
_UpperCAmelCase : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A ) )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Union[str, Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_UpperCAmelCase : Dict = Image.open(A )
_UpperCAmelCase : Optional[int] = AgentImage(A )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_UpperCAmelCase : List[str] = Image.open(A )
_UpperCAmelCase : Union[str, Any] = AgentImage(A )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A ) )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = '''Hey!'''
_UpperCAmelCase : Any = AgentText(A )
self.assertEqual(A , agent_type.to_string() )
self.assertEqual(A , agent_type.to_raw() )
self.assertEqual(A , A )
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ (UpperCamelCase__ : list[int] ):
if not nums:
return 0
_UpperCAmelCase : int = nums[0]
_UpperCAmelCase : Union[str, Any] = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Any = (
max_excluding + num,
max(UpperCamelCase__ , UpperCamelCase__ ),
)
return max(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCAmelCase :int = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_lowerCAmelCase :Dict = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_lowerCAmelCase :int = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_lowerCAmelCase :Dict = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_lowerCAmelCase :int = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
_lowerCAmelCase :str = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=False ):
_UpperCAmelCase : Optional[int] = checkpoint[F'{old_prefix}.in_layers.0.weight']
_UpperCAmelCase : Optional[int] = checkpoint[F'{old_prefix}.in_layers.0.bias']
_UpperCAmelCase : str = checkpoint[F'{old_prefix}.in_layers.2.weight']
_UpperCAmelCase : Optional[Any] = checkpoint[F'{old_prefix}.in_layers.2.bias']
_UpperCAmelCase : Union[str, Any] = checkpoint[F'{old_prefix}.emb_layers.1.weight']
_UpperCAmelCase : List[str] = checkpoint[F'{old_prefix}.emb_layers.1.bias']
_UpperCAmelCase : Optional[int] = checkpoint[F'{old_prefix}.out_layers.0.weight']
_UpperCAmelCase : Any = checkpoint[F'{old_prefix}.out_layers.0.bias']
_UpperCAmelCase : Any = checkpoint[F'{old_prefix}.out_layers.3.weight']
_UpperCAmelCase : Union[str, Any] = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
_UpperCAmelCase : Optional[int] = checkpoint[F'{old_prefix}.skip_connection.weight']
_UpperCAmelCase : str = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
_UpperCAmelCase : Optional[int] = checkpoint[F'{old_prefix}.norm.weight']
_UpperCAmelCase : List[Any] = checkpoint[F'{old_prefix}.norm.bias']
_UpperCAmelCase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase : Union[str, Any] = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCAmelCase : Tuple = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
_UpperCAmelCase : str = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' )
_UpperCAmelCase : str = {}
_UpperCAmelCase : Any = checkpoint['''time_embed.0.weight''']
_UpperCAmelCase : List[Any] = checkpoint['''time_embed.0.bias''']
_UpperCAmelCase : Union[str, Any] = checkpoint['''time_embed.2.weight''']
_UpperCAmelCase : str = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_UpperCAmelCase : str = checkpoint['''label_emb.weight''']
_UpperCAmelCase : Dict = checkpoint['''input_blocks.0.0.weight''']
_UpperCAmelCase : Optional[Any] = checkpoint['''input_blocks.0.0.bias''']
_UpperCAmelCase : str = unet_config['''down_block_types''']
_UpperCAmelCase : Any = unet_config['''layers_per_block''']
_UpperCAmelCase : int = unet_config['''attention_head_dim''']
_UpperCAmelCase : Dict = unet_config['''block_out_channels''']
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[Any] = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase__ ):
_UpperCAmelCase : Any = channels_list[i]
_UpperCAmelCase : List[str] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase__ ):
_UpperCAmelCase : int = F'down_blocks.{i}.resnets.{j}'
_UpperCAmelCase : Union[str, Any] = F'input_blocks.{current_layer}.0'
_UpperCAmelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase : Optional[Any] = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = F'down_blocks.{i}.resnets.{j}'
_UpperCAmelCase : Optional[Any] = F'input_blocks.{current_layer}.0'
_UpperCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
_UpperCAmelCase : Optional[Any] = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = F'down_blocks.{i}.attentions.{j}'
_UpperCAmelCase : Dict = F'input_blocks.{current_layer}.1'
_UpperCAmelCase : Union[str, Any] = convert_attention(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
current_layer += 1
if i != len(UpperCamelCase__ ) - 1:
_UpperCAmelCase : Optional[int] = F'down_blocks.{i}.downsamplers.0'
_UpperCAmelCase : Tuple = F'input_blocks.{current_layer}.0'
_UpperCAmelCase : Any = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
current_layer += 1
_UpperCAmelCase : Any = current_channels
# hardcoded the mid-block for now
_UpperCAmelCase : Optional[int] = '''mid_block.resnets.0'''
_UpperCAmelCase : Optional[Any] = '''middle_block.0'''
_UpperCAmelCase : int = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : int = '''mid_block.attentions.0'''
_UpperCAmelCase : Tuple = '''middle_block.1'''
_UpperCAmelCase : Tuple = convert_attention(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Tuple = '''mid_block.resnets.1'''
_UpperCAmelCase : List[Any] = '''middle_block.2'''
_UpperCAmelCase : Tuple = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Dict = unet_config['''up_block_types''']
for i, layer_type in enumerate(UpperCamelCase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase : Union[str, Any] = F'up_blocks.{i}.resnets.{j}'
_UpperCAmelCase : Any = F'output_blocks.{current_layer}.0'
_UpperCAmelCase : str = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
current_layer += 1
if i != len(UpperCamelCase__ ) - 1:
_UpperCAmelCase : Optional[int] = F'up_blocks.{i}.upsamplers.0'
_UpperCAmelCase : Optional[int] = F'output_blocks.{current_layer-1}.1'
_UpperCAmelCase : Tuple = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCAmelCase : Optional[int] = F'up_blocks.{i}.resnets.{j}'
_UpperCAmelCase : List[str] = F'output_blocks.{current_layer}.0'
_UpperCAmelCase : Any = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_skip=UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = F'up_blocks.{i}.attentions.{j}'
_UpperCAmelCase : List[str] = F'output_blocks.{current_layer}.1'
_UpperCAmelCase : Union[str, Any] = convert_attention(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
current_layer += 1
if i != len(UpperCamelCase__ ) - 1:
_UpperCAmelCase : Dict = F'up_blocks.{i}.upsamplers.0'
_UpperCAmelCase : List[str] = F'output_blocks.{current_layer-1}.2'
_UpperCAmelCase : Union[str, Any] = convert_resnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Tuple = checkpoint['''out.0.weight''']
_UpperCAmelCase : Any = checkpoint['''out.0.bias''']
_UpperCAmelCase : Any = checkpoint['''out.2.weight''']
_UpperCAmelCase : str = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
_lowerCAmelCase :Optional[int] = parser.parse_args()
_lowerCAmelCase :List[Any] = strabool(args.class_cond)
_lowerCAmelCase :str = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCAmelCase :Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase :List[str] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCAmelCase :int = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
_lowerCAmelCase :int = None
_lowerCAmelCase :List[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCAmelCase :Dict = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCAmelCase :Optional[int] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCAmelCase :int = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase :Tuple = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
_lowerCAmelCase :List[str] = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCAmelCase :int = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 263 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase :Union[str, Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=8 ):
_UpperCAmelCase : Optional[int] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_UpperCAmelCase : Optional[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
text_encoder=A , tokenizer=A , unet=A , scheduler=A , movq=A , )
_UpperCAmelCase : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Dict:
if latents is None:
_UpperCAmelCase : Dict = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase : Dict = latents.to(A )
_UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self , A , A , A , A , A=None , ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = len(A ) if isinstance(A , A ) else 1
# get prompt text embeddings
_UpperCAmelCase : List[str] = self.tokenizer(
A , padding='''max_length''' , truncation=A , max_length=7_7 , return_attention_mask=A , add_special_tokens=A , return_tensors='''pt''' , )
_UpperCAmelCase : str = text_inputs.input_ids
_UpperCAmelCase : Any = self.tokenizer(A , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A , A ):
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_UpperCAmelCase : List[str] = text_input_ids.to(A )
_UpperCAmelCase : int = text_inputs.attention_mask.to(A )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.text_encoder(
input_ids=A , attention_mask=A )
_UpperCAmelCase : Tuple = prompt_embeds.repeat_interleave(A , dim=0 )
_UpperCAmelCase : Union[str, Any] = text_encoder_hidden_states.repeat_interleave(A , dim=0 )
_UpperCAmelCase : Any = text_mask.repeat_interleave(A , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : List[str]
if negative_prompt is None:
_UpperCAmelCase : List[Any] = [''''''] * batch_size
elif type(A ) is not type(A ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='
f' {type(A )}.' )
elif isinstance(A , A ):
_UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
_UpperCAmelCase : Optional[int] = negative_prompt
_UpperCAmelCase : List[Any] = self.tokenizer(
A , padding='''max_length''' , max_length=7_7 , truncation=A , return_attention_mask=A , add_special_tokens=A , return_tensors='''pt''' , )
_UpperCAmelCase : Tuple = uncond_input.input_ids.to(A )
_UpperCAmelCase : int = uncond_input.attention_mask.to(A )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.text_encoder(
input_ids=A , attention_mask=A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : Any = negative_prompt_embeds.shape[1]
_UpperCAmelCase : Optional[int] = negative_prompt_embeds.repeat(1 , A )
_UpperCAmelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A )
_UpperCAmelCase : List[Any] = uncond_text_encoder_hidden_states.shape[1]
_UpperCAmelCase : Any = uncond_text_encoder_hidden_states.repeat(1 , A , 1 )
_UpperCAmelCase : Union[str, Any] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A , -1 )
_UpperCAmelCase : Optional[int] = uncond_text_mask.repeat_interleave(A , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : Any = torch.cat([negative_prompt_embeds, prompt_embeds] )
_UpperCAmelCase : Any = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_UpperCAmelCase : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCAmelCase ( self , A=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_UpperCAmelCase : List[str] = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : Optional[int] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def __lowerCAmelCase ( self , A=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_UpperCAmelCase : Dict = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Dict = cpu_offload_with_hook(A , A , prev_module_hook=A )
if self.safety_checker is not None:
_UpperCAmelCase , _UpperCAmelCase : int = cpu_offload_with_hook(self.safety_checker , A , prev_module_hook=A )
# We'll offload the last model manually.
_UpperCAmelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> List[Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A , A = None , A = 5_1_2 , A = 5_1_2 , A = 1_0_0 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> List[str]:
if isinstance(A , A ):
_UpperCAmelCase : Optional[int] = 1
elif isinstance(A , A ):
_UpperCAmelCase : Tuple = len(A )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A )}' )
_UpperCAmelCase : str = self._execution_device
_UpperCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
_UpperCAmelCase : List[Any] = guidance_scale > 1.0
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = self._encode_prompt(
A , A , A , A , A )
if isinstance(A , A ):
_UpperCAmelCase : Any = torch.cat(A , dim=0 )
if isinstance(A , A ):
_UpperCAmelCase : Optional[int] = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : int = image_embeds.repeat_interleave(A , dim=0 )
_UpperCAmelCase : List[str] = negative_image_embeds.repeat_interleave(A , dim=0 )
_UpperCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
_UpperCAmelCase : int = self.scheduler.timesteps
_UpperCAmelCase : List[Any] = self.unet.config.in_channels
_UpperCAmelCase , _UpperCAmelCase : List[Any] = get_new_h_w(A , A , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : List[str] = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
_UpperCAmelCase : List[str] = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = variance_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(
A , A , A , generator=A , ).prev_sample
# post-processing
_UpperCAmelCase : Optional[Any] = self.movq.decode(A , force_not_quantize=A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
_UpperCAmelCase : Tuple = image.clamp(0 , 1 )
_UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :int = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[str] = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCamelCase_ (UpperCamelCase__ : int = 8 ):
_UpperCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCamelCase__ )
_UpperCAmelCase : List[str] = i // 3
_UpperCAmelCase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_UpperCAmelCase : int = (
chars_incl
+ random(UpperCamelCase__ , quotient + remainder )
+ random(UpperCamelCase__ , UpperCamelCase__ )
+ random(UpperCamelCase__ , UpperCamelCase__ )
)
_UpperCAmelCase : Optional[int] = list(UpperCamelCase__ )
shuffle(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
# random is a generalised function for letters, characters and numbers
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : int ):
return "".join(secrets.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
pass # Put your code here...
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
pass # Put your code here...
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
pass # Put your code here...
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : int = 8 ):
if len(UpperCamelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
_UpperCAmelCase : Optional[int] = any(char in ascii_uppercase for char in password )
_UpperCAmelCase : int = any(char in ascii_lowercase for char in password )
_UpperCAmelCase : Tuple = any(char in digits for char in password )
_UpperCAmelCase : Optional[int] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCamelCase_ ():
_UpperCAmelCase : List[str] = int(input('''Please indicate the max length of your password: ''' ).strip() )
_UpperCAmelCase : str = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCamelCase__ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCamelCase__ , UpperCamelCase__ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase :Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=a )
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =42
a__ =None
a__ =None
a__ =None
@dataclass(frozen=a )
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
a__ =None
a__ =None
a__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
def __init__( self , A , A , A , A = None , A=False , A = False , ) -> str:
_UpperCAmelCase : Union[str, Any] = hans_processors[task]()
_UpperCAmelCase : List[Any] = os.path.join(
A , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(A ) , A , ) , )
_UpperCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Tuple = label_list[2], label_list[1]
_UpperCAmelCase : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : str = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_UpperCAmelCase : str = torch.load(A )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_UpperCAmelCase : Any = (
processor.get_dev_examples(A ) if evaluate else processor.get_train_examples(A )
)
logger.info('''Training examples: %s''' , len(A ) )
_UpperCAmelCase : Optional[Any] = hans_convert_examples_to_features(A , A , A , A )
logger.info('''Saving features into cached file %s''' , A )
torch.save(self.features , A )
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , A ) -> InputFeatures:
return self.features[i]
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
def __init__( self , A , A , A , A = 1_2_8 , A=False , A = False , ) -> List[Any]:
_UpperCAmelCase : List[str] = hans_processors[task]()
_UpperCAmelCase : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = label_list[2], label_list[1]
_UpperCAmelCase : List[str] = label_list
_UpperCAmelCase : Any = processor.get_dev_examples(A ) if evaluate else processor.get_train_examples(A )
_UpperCAmelCase : Optional[int] = hans_convert_examples_to_features(A , A , A , A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
A , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __lowerCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , A ) -> InputFeatures:
return self.features[i]
def __lowerCAmelCase ( self ) -> List[str]:
return self.label_list
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self , A ) -> Optional[int]:
return self._create_examples(self._read_tsv(os.path.join(A , '''heuristics_train_set.txt''' ) ) , '''train''' )
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(A , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def __lowerCAmelCase ( self ) -> Dict:
return ["contradiction", "entailment", "neutral"]
def __lowerCAmelCase ( self , A , A ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = []
for i, line in enumerate(A ):
if i == 0:
continue
_UpperCAmelCase : Any = '''%s-%s''' % (set_type, line[0])
_UpperCAmelCase : List[str] = line[5]
_UpperCAmelCase : int = line[6]
_UpperCAmelCase : Union[str, Any] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
_UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=A , text_a=A , text_b=A , label=A , pairID=A ) )
return examples
def lowerCamelCase_ (UpperCamelCase__ : List[InputExample] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : PreTrainedTokenizer , ):
_UpperCAmelCase : Optional[int] = {label: i for i, label in enumerate(UpperCamelCase__ )}
_UpperCAmelCase : List[str] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
_UpperCAmelCase : List[str] = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , truncation=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , )
_UpperCAmelCase : Any = label_map[example.label] if example.label in label_map else 0
_UpperCAmelCase : Tuple = int(example.pairID )
features.append(InputFeatures(**UpperCamelCase__ , label=UpperCamelCase__ , pairID=UpperCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
_lowerCAmelCase :str = {
'hans': 3,
}
_lowerCAmelCase :Any = {
'hans': HansProcessor,
}
| 263 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BartphoTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
a = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
a = {'''unk_token''': '''<unk>'''}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : str , **__UpperCAmelCase : Any ) ->List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a = '''This is a là test'''
a = '''This is a<unk><unk> test'''
return input_text, output_text
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
a = '''This is a là test'''
a = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
a = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = tokens + [tokenizer.unk_token]
a = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
return 1
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 2_00 ) -> int:
'''simple docstring'''
return two_pound(snake_case_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=99 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Optional[int]=36 , UpperCamelCase : Union[str, Any]=6 , UpperCamelCase : Dict=6 , UpperCamelCase : Dict=6 , UpperCamelCase : Any=37 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : Dict=16 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Any=3 , UpperCamelCase : Dict=4 , UpperCamelCase : Tuple=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_hidden_groups
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = AlbertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowercase__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = AlbertForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , sentence_order_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCamelCase__ (self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AlbertForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AlbertForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = AlbertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self : str , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = AlbertForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = AlbertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : int = True
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = AlbertModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AlbertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase )
lowercase__ = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4 ) )
| 2 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = '''ZinengTang/tvlt-base'''
A : List[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = self.get_image_processor()
A : Tuple = self.get_feature_extractor()
A : Any = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
A : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = self.get_image_processor()
A : Optional[int] = self.get_feature_extractor()
A : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = np.ones([12000] )
A : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A : Dict = processor(audio=SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = self.get_image_processor()
A : int = self.get_feature_extractor()
A : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
A : int = np.ones([3, 224, 224] )
A : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A : Dict = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = self.get_image_processor()
A : Union[str, Any] = self.get_feature_extractor()
A : List[str] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
A : Dict = np.ones([12000] )
A : Tuple = np.ones([3, 224, 224] )
A : Optional[Any] = processor(audio=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : List[Any] = self.get_image_processor()
A : str = self.get_feature_extractor()
A : Optional[int] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 3 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Initialise PyTorch model
lowerCAmelCase = AlbertConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase = AlbertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 4 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''vit'''
def __init__(self , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=2_2_4 , UpperCAmelCase=1_6 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=1_6 , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =qkv_bias
_lowercase =encoder_stride
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''')
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A (self ) -> float:
return 1e-4
| 5 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class | 6 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'biogpt'
def __init__( self : str,lowercase_ : Union[str, Any]=4_2_3_8_4,lowercase_ : List[str]=1_0_2_4,lowercase_ : Dict=2_4,lowercase_ : str=1_6,lowercase_ : Dict=4_0_9_6,lowercase_ : Optional[Any]="gelu",lowercase_ : List[str]=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[str]=1_0_2_4,lowercase_ : Optional[Any]=0.02,lowercase_ : str=1E-12,lowercase_ : List[Any]=True,lowercase_ : Dict=True,lowercase_ : Union[str, Any]=0.0,lowercase_ : Optional[Any]=0.0,lowercase_ : Union[str, Any]=1,lowercase_ : List[Any]=0,lowercase_ : Dict=2,**lowercase_ : List[str],)-> Dict:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = scale_embedding
A__ = use_cache
A__ = layerdrop
A__ = activation_dropout
super().__init__(pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_ )
| 7 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Any , *_UpperCamelCase : int , **_UpperCamelCase : Optional[Any] ) ->Union[str, Any]:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : List[str] ) ->str:
snake_case_, snake_case_ = {}, {}
if padding is not None:
snake_case_ = padding
if truncation is not None:
snake_case_ = truncation
if top_k is not None:
snake_case_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , _UpperCamelCase : Union["Image.Image", str] , _UpperCamelCase : str = None , **_UpperCamelCase : Any ) ->Any:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = {'''image''': image, '''question''': question}
else:
snake_case_ = image
snake_case_ = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=False , _UpperCamelCase : List[Any]=False ) ->Optional[Any]:
snake_case_ = load_image(inputs['''image'''] )
snake_case_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
snake_case_ = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[Any] ) ->List[Any]:
snake_case_ = self.model(**_UpperCamelCase )
return model_outputs
def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=5 ) ->Any:
if top_k > self.model.config.num_labels:
snake_case_ = self.model.config.num_labels
if self.framework == "pt":
snake_case_ = model_outputs.logits.sigmoid()[0]
snake_case_, snake_case_ = probs.topk(_UpperCamelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
snake_case_ = scores.tolist()
snake_case_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )] | 8 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
lowerCamelCase__: int =value
lowerCamelCase__: Node | None =None
lowerCamelCase__: Node | None =None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : Node) ->None:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =tree
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Node | None) ->int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__(self : int) ->Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 0 |
import math
import unittest
def _UpperCAmelCase (UpperCamelCase__ : int ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> Optional[Any]:
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(1_1))
self.assertTrue(is_prime(1_3))
self.assertTrue(is_prime(1_7))
self.assertTrue(is_prime(1_9))
self.assertTrue(is_prime(2_3))
self.assertTrue(is_prime(2_9))
def _lowerCamelCase ( self) -> List[Any]:
with self.assertRaises(__lowerCamelCase):
is_prime(-1_9)
self.assertFalse(
is_prime(0) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 11 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = LxmertTokenizer
UpperCAmelCase__ : Tuple = LxmertTokenizerFast
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: str ):
super().setUp()
__lowerCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = """UNwant\u00E9d,running"""
__lowerCamelCase = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.tokenizer_class(self.vocab_file )
__lowerCamelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCamelCase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self: str ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
__lowerCamelCase = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(UpperCamelCase_ )
__lowerCamelCase = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 12 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A_ ( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = AutoModelForSeqaSeqLM.from_config(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
AutoTokenizer.from_pretrained(_UpperCAmelCase ).save_pretrained(_UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 13 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
from math import factorial
_lowerCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ = 60 , lowercase_ = 1_000_000 ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
A__ = 0
# the cached sizes of the previous chains
A__ = {}
for start_chain_element in range(1 , lowercase_ ):
# The temporary set will contain the elements of the chain
A__ = set()
A__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase_ )
chain_set_length += 1
A__ = digit_factorial_sum(lowercase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 14 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,*A : List[Any] ,**A : int ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." ,A ,)
super().__init__(*A ,**A )
| 15 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase_ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> str:
if rng is None:
lowercase__ : Optional[int] = random.Random()
lowercase__ : str = 1
for dim in shape:
total_dims *= dim
lowercase__ : Union[str, Any] = []
for _ in range(__lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase__ : Any = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase )
return output
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> List[Any]:
lowercase__ : Optional[int] = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase )
# make sure that at least one token is attended to for each batch
lowercase__ : Union[str, Any] = 1
return attn_mask
@require_flax
class __A :
'''simple docstring'''
lowerCAmelCase : str = None
lowerCAmelCase : int = ()
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase__ : Optional[Any] = 2
lowercase__ : Tuple = inputs['''input_ids'''].shape[-1] // 2
lowercase__ : Tuple = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase__ : Any = jnp.ones_like(_snake_case )
lowercase__ : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase__ : Tuple = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase__ : str = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = self._get_input_ids_and_config()
lowercase__ : Dict = False
lowercase__ : List[str] = max_length
lowercase__ : List[str] = 0
for model_class in self.all_generative_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : List[str] = getattr(_snake_case ,_snake_case )
lowercase__ : Dict = pt_model_class(_snake_case ).eval()
lowercase__ : Optional[Any] = load_flax_weights_in_pytorch_model(_snake_case ,flax_model.params )
lowercase__ : Tuple = flax_model.generate(_snake_case ).sequences
lowercase__ : List[str] = pt_model.generate(torch.tensor(_snake_case ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase__ : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = self._get_input_ids_and_config()
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
lowercase__ : Optional[int] = model_class(_snake_case )
lowercase__ : List[str] = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : Optional[Any] = jit(model.generate )
lowercase__ : Optional[int] = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = self._get_input_ids_and_config()
lowercase__ : str = True
lowercase__ : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : Tuple = jit(model.generate )
lowercase__ : Optional[Any] = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = self._get_input_ids_and_config()
lowercase__ : str = False
lowercase__ : Optional[int] = max_length
lowercase__ : int = 2
for model_class in self.all_generative_model_classes:
lowercase__ : Dict = model_class(_snake_case )
lowercase__ : Optional[int] = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : Dict = jit(model.generate )
lowercase__ : Optional[int] = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = self._get_input_ids_and_config()
lowercase__ : Dict = False
lowercase__ : Tuple = max_length
lowercase__ : Tuple = 2
lowercase__ : List[Any] = 2
for model_class in self.all_generative_model_classes:
lowercase__ : Optional[int] = model_class(_snake_case )
lowercase__ : str = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = self._get_input_ids_and_config()
lowercase__ : Any = True
lowercase__ : Optional[Any] = max_length
lowercase__ : int = 0.8
lowercase__ : Optional[Any] = 10
lowercase__ : Union[str, Any] = 0.3
lowercase__ : Dict = 1
lowercase__ : Optional[int] = 8
lowercase__ : Any = 9
for model_class in self.all_generative_model_classes:
lowercase__ : Any = model_class(_snake_case )
lowercase__ : Any = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : Optional[int] = jit(model.generate )
lowercase__ : Tuple = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = self._get_input_ids_and_config()
lowercase__ : Union[str, Any] = max_length
lowercase__ : Union[str, Any] = 1
lowercase__ : List[Any] = 8
lowercase__ : int = 9
for model_class in self.all_generative_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : Optional[int] = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : List[str] = jit(model.generate )
lowercase__ : Dict = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = self._get_input_ids_and_config()
lowercase__ : Tuple = max_length
lowercase__ : List[Any] = 2
lowercase__ : Optional[Any] = 1
lowercase__ : Any = 8
lowercase__ : int = 9
for model_class in self.all_generative_model_classes:
lowercase__ : Union[str, Any] = model_class(_snake_case )
lowercase__ : str = model.generate(_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : int = jit(model.generate )
lowercase__ : Optional[Any] = jit_generate(_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ : Tuple = attention_mask.at[(0, 0)].set(0 )
lowercase__ : Union[str, Any] = False
lowercase__ : int = max_length
for model_class in self.all_generative_model_classes:
lowercase__ : Optional[int] = model_class(_snake_case )
lowercase__ : List[str] = model.generate(_snake_case ,attention_mask=_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : Any = jit(model.generate )
lowercase__ : List[Any] = jit_generate(_snake_case ,attention_mask=_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ : Any = attention_mask.at[(0, 0)].set(0 )
lowercase__ : Any = True
lowercase__ : List[str] = max_length
for model_class in self.all_generative_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : Tuple = model.generate(_snake_case ,attention_mask=_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : Optional[Any] = jit(model.generate )
lowercase__ : Union[str, Any] = jit_generate(_snake_case ,attention_mask=_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ : List[Any] = attention_mask.at[(0, 0)].set(0 )
lowercase__ : Optional[int] = 2
lowercase__ : List[str] = max_length
for model_class in self.all_generative_model_classes:
lowercase__ : Any = model_class(_snake_case )
lowercase__ : int = model.generate(_snake_case ,attention_mask=_snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_snake_case )
lowercase__ : List[Any] = jit(model.generate )
lowercase__ : int = jit_generate(_snake_case ,attention_mask=_snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
lowercase__ : str = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase__ : Union[str, Any] = '''Hello world'''
lowercase__ : List[Any] = tokenizer(_snake_case ,return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_snake_case ,'''do_samples''' ):
model.generate(_snake_case ,do_samples=_snake_case )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_snake_case ,'''foo''' ):
lowercase__ : int = {'''foo''': '''bar'''}
model.generate(_snake_case ,**_snake_case )
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Union[str, Any]) -> Dict:
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
__lowercase = len(UpperCamelCase_) if (len(UpperCamelCase_) > 7) else 7
# Print table header for output
print(
"Symbol".center(8), "Stack".center(UpperCamelCase_), "Postfix".center(UpperCamelCase_), sep=" | ", )
print("-" * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase_) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase_) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase_) == 0:
stack.append(UpperCamelCase_) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase_) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(UpperCamelCase_) # push x to stack
print(
x.center(8), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), sep=" | ", ) # Output in tabular format
while len(UpperCamelCase_) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
" ".center(8), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), ("".join(UpperCamelCase_)).ljust(UpperCamelCase_), sep=" | ", ) # Output in tabular format
return "".join(UpperCamelCase_) # return Postfix as str
def _A ( UpperCamelCase_ : Union[str, Any]) -> List[Any]:
'''simple docstring'''
__lowercase = list(infix[::-1]) # reverse the infix equation
for i in range(len(UpperCamelCase_)):
if infix[i] == "(":
__lowercase = ")" # change "(" to ")"
elif infix[i] == ")":
__lowercase = "(" # change ")" to "("
return (infix_2_postfix("".join(UpperCamelCase_)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_a = input('\nEnter an Infix Equation = ') # Input an Infix equation
_a = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 17 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : bool = True , lowerCAmelCase : float = math.inf , lowerCAmelCase : float = -math.inf , lowerCAmelCase : float = math.inf , lowerCAmelCase : float = -math.inf , lowerCAmelCase : bool = False , lowerCAmelCase : float = 1_0_0 , lowerCAmelCase : float = 0.01 , lowerCAmelCase : float = 1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = search_prob
SCREAMING_SNAKE_CASE_ : Dict = start_temperate
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Any = None
while not search_end:
SCREAMING_SNAKE_CASE_ : str = current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE_ : str = current_state
scores.append(lowerCAmelCase )
iterations += 1
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE_ : Optional[Any] = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE_ : int = neighbors.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE_ : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = picked_neighbor
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE_ : Tuple = picked_neighbor
SCREAMING_SNAKE_CASE_ : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE_ : List[Any] = True
else:
SCREAMING_SNAKE_CASE_ : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : List[Any] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : int = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
__lowerCamelCase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : str = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
__lowerCamelCase : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 18 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __snake_case ( lowerCAmelCase ):
_a : torch.FloatTensor
_a : Optional[torch.FloatTensor]= None
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.999 , SCREAMING_SNAKE_CASE__="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase : Tuple = []
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = i / num_diffusion_timesteps
lowercase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
class __snake_case ( lowerCAmelCase , lowerCAmelCase ):
_a : Union[str, Any]= 1
@register_to_config
def __init__( self ,snake_case = 1000 ,snake_case = 0.0_001 ,snake_case = 0.02 ,snake_case = "linear" ,snake_case = None ,snake_case = True ,snake_case = True ,snake_case = 0 ,snake_case = "epsilon" ,snake_case = 1.0 ,**snake_case ,):
'''simple docstring'''
if kwargs.get("""set_alpha_to_one""" ,snake_case ) is not None:
lowercase : Any = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" ,"""1.0.0""" ,snake_case ,standard_warn=snake_case )
lowercase : List[str] = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
lowercase : str = torch.tensor(snake_case ,dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase : Union[str, Any] = torch.linspace(snake_case ,snake_case ,snake_case ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase : int = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,snake_case ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase : List[Any] = betas_for_alpha_bar(snake_case )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase : Optional[Any] = 1.0 - self.betas
lowercase : str = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase : Optional[int] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase : Dict = 1.0
# setable values
lowercase : List[Any] = None
lowercase : Dict = torch.from_numpy(np.arange(0 ,snake_case ).copy().astype(np.intaa ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
return sample
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps." )
lowercase : Any = num_inference_steps
lowercase : Any = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Union[str, Any] = (np.arange(0 ,snake_case ) * step_ratio).round().copy().astype(np.intaa )
lowercase : Dict = torch.from_numpy(snake_case ).to(snake_case )
self.timesteps += self.config.steps_offset
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case = 0.0 ,snake_case = False ,snake_case = None ,snake_case = True ,):
'''simple docstring'''
lowercase : List[str] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase : Optional[Any] = self.alphas_cumprod[timestep]
lowercase : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase : str = model_output
elif self.config.prediction_type == "sample":
lowercase : str = model_output
lowercase : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase : int = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase : int = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=snake_case ,pred_original_sample=snake_case )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE : Optional[Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ) -> Dict:
_lowercase : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowercase : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCamelCase, scheduler=lowerCamelCase, movq=lowerCamelCase, )
_lowercase : List[str] = 2 ** (len(self.movq.config.block_out_channels) - 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if latents is None:
_lowercase : Optional[Any] = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
_lowercase : int = latents.to(lowerCamelCase)
_lowercase : int = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase ( self, lowerCamelCase=0) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
_lowercase : Tuple = torch.device(F'''cuda:{gpu_id}''')
_lowercase : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase=0) -> int:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
_lowercase : Union[str, Any] = torch.device(F'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=lowerCamelCase)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowercase : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowercase , _lowercase : List[str] = cpu_offload_with_hook(lowerCamelCase, lowerCamelCase, prev_module_hook=lowerCamelCase)
# We'll offload the last model manually.
_lowercase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
if not hasattr(self.unet, '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase, '_hf_hook')
and hasattr(module._hf_hook, 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase)
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 1_00, lowerCamelCase = 4.0, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> List[Any]:
"""simple docstring"""
_lowercase : int = self._execution_device
_lowercase : Any = guidance_scale > 1.0
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
_lowercase : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Optional[int] = torch.cat(lowerCamelCase, dim=0)
if do_classifier_free_guidance:
_lowercase : str = image_embeds.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : int = negative_image_embeds.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=lowerCamelCase)
self.scheduler.set_timesteps(lowerCamelCase, device=lowerCamelCase)
_lowercase : Optional[Any] = self.scheduler.timesteps
_lowercase : List[str] = self.unet.config.in_channels
_lowercase , _lowercase : Union[str, Any] = downscale_height_and_width(lowerCamelCase, lowerCamelCase, self.movq_scale_factor)
# create initial latent
_lowercase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, lowerCamelCase, lowerCamelCase, lowerCamelCase, self.scheduler, )
for i, t in enumerate(self.progress_bar(lowerCamelCase)):
# expand the latents if we are doing classifier free guidance
_lowercase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : Tuple = {'image_embeds': image_embeds}
_lowercase : Optional[Any] = self.unet(
sample=lowerCamelCase, timestep=lowerCamelCase, encoder_hidden_states=lowerCamelCase, added_cond_kwargs=lowerCamelCase, return_dict=lowerCamelCase, )[0]
if do_classifier_free_guidance:
_lowercase , _lowercase : List[str] = noise_pred.split(latents.shape[1], dim=1)
_lowercase , _lowercase : Tuple = noise_pred.chunk(2)
_lowercase , _lowercase : Any = variance_pred.chunk(2)
_lowercase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowercase : Tuple = torch.cat([noise_pred, variance_pred_text], dim=1)
if not (
hasattr(self.scheduler.config, 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowercase , _lowercase : int = noise_pred.split(latents.shape[1], dim=1)
# compute the previous noisy sample x_t -> x_t-1
_lowercase : str = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase, )[0]
# post-processing
_lowercase : str = self.movq.decode(lowerCamelCase, force_not_quantize=lowerCamelCase)['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
_lowercase : Dict = image * 0.5 + 0.5
_lowercase : Any = image.clamp(0, 1)
_lowercase : List[str] = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
_lowercase : Optional[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 21 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Tuple = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = """sew-d"""
def __init__( self : Optional[Any] , snake_case_ : Tuple=3_2 , snake_case_ : Optional[Any]=7_6_8 , snake_case_ : Tuple=1_2 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Tuple=3_0_7_2 , snake_case_ : Tuple=2 , snake_case_ : int=5_1_2 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : str=("p2c", "c2p") , snake_case_ : Dict="layer_norm" , snake_case_ : str="gelu_python" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : str=1e-7 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Optional[Any]="group" , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Dict=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : int=False , snake_case_ : Union[str, Any]=1_2_8 , snake_case_ : int=1_6 , snake_case_ : Any=True , snake_case_ : Tuple=0.0_5 , snake_case_ : Tuple=1_0 , snake_case_ : Dict=2 , snake_case_ : Tuple=0.0 , snake_case_ : List[Any]=1_0 , snake_case_ : Union[str, Any]=0 , snake_case_ : Any="mean" , snake_case_ : Optional[Any]=False , snake_case_ : Any=False , snake_case_ : Tuple=2_5_6 , snake_case_ : int=0 , snake_case_ : Optional[Any]=1 , snake_case_ : List[str]=2 , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = squeeze_factor
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = position_buckets
_UpperCAmelCase = share_att_key
_UpperCAmelCase = relative_attention
_UpperCAmelCase = norm_rel_ebd
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = feature_layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# sequence classification
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
@property
def lowercase ( self : Any ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 22 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__: int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self : int , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : Any , ) -> None:
super().__init__(**__snake_case )
UpperCAmelCase : Any = size if size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase : int = get_size_dict(__snake_case )
UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase : str = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase : Dict = do_resize
UpperCAmelCase : Any = size
UpperCAmelCase : Optional[int] = resample
UpperCAmelCase : List[str] = do_center_crop
UpperCAmelCase : Optional[int] = crop_size
UpperCAmelCase : Any = do_rescale
UpperCAmelCase : int = rescale_factor
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Any , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ) -> np.ndarray:
UpperCAmelCase : Dict = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
__snake_case , size=(size['''height'''], size['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : Any , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : int , ) -> np.ndarray:
UpperCAmelCase : Optional[int] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def A ( self : List[Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ) -> List[Any]:
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : Union[str, Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : Tuple=None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[Any] , ) -> PIL.Image.Image:
UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Any = resample if resample is not None else self.resample
UpperCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
UpperCAmelCase : str = size if size is not None else self.size
UpperCAmelCase : Tuple = get_size_dict(__snake_case )
UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Optional[Any] = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase : Any = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase : Any = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase : Optional[int] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase : List[Any] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
UpperCAmelCase : Tuple = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
UpperCAmelCase : Any = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
UpperCAmelCase : Optional[int] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 23 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''gptj'''
__UpperCamelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=5_04_00 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = n_positions
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_embd
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_layer
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_head
SCREAMING_SNAKE_CASE__ : Any = n_inner
SCREAMING_SNAKE_CASE__ : str = rotary_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = resid_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = embd_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : Dict = bos_token_id
SCREAMING_SNAKE_CASE__ : str = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "default" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> str:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , """pad_token_id""" , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE__ : List[str] = 0
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self._config.n_head
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ : List[Any] = common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 13
| 25 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device("cpu")
def lowerCAmelCase_ ( ):
_A : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
def lowerCAmelCase_ ( snake_case_ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = dct.pop(snake_case_ )
_A : Optional[Any] = val
def lowerCAmelCase_ ( snake_case_ ):
_A : int = []
for k in state_dict.keys():
_A : Optional[Any] = k
if ".pwconv" in k:
_A : int = k_new.replace(""".pwconv""",""".point_wise_conv""" )
if ".dwconv" in k:
_A : Dict = k_new.replace(""".dwconv""",""".depth_wise_conv""" )
if ".Proj." in k:
_A : str = k_new.replace(""".Proj.""",""".proj.""" )
if "patch_embed" in k_new:
_A : Optional[Any] = k_new.replace("""patch_embed""","""swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_A : Any = k_new.split(""".""" )
if ls[2].isdigit():
_A : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_A : Optional[Any] = k_new.replace("""network""","""swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_A : str = 1000
_A : List[str] = """huggingface/label-files"""
_A : Union[str, Any] = """imagenet-1k-id2label.json"""
_A : Tuple = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : int = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Any = idalabel
_A : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_A : List[Any] = [3, 3, 6, 4]
_A : List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_A : Dict = [3, 3, 9, 6]
_A : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_A : Optional[Any] = [4, 3, 10, 5]
_A : Optional[Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_A : Tuple = [4, 4, 12, 6]
_A : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_A : Optional[Any] = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",check_hash=snake_case_ )
else:
_A : List[Any] = torch.load(snake_case_,map_location="""cpu""" )
_A : Dict = checkpoint
_A : Dict = create_rename_keys(snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load HuggingFace model
_A : str = SwiftFormerForImageClassification(snake_case_ ).eval()
hf_model.load_state_dict(snake_case_ )
# prepare test inputs
_A : Any = prepare_img()
_A : Optional[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_A : Any = processor(images=snake_case_,return_tensors="""pt""" )
# compare outputs from both models
_A : Union[str, Any] = get_expected_output(snake_case_ )
_A : List[Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5],snake_case_,atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
'''simple docstring'''
import cva
import numpy as np
class __UpperCamelCase :
def __init__( self , __a , __a ):
'''simple docstring'''
if k in (0.04, 0.06):
__a : Union[str, Any] = k
__a : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = cva.imread(__a , 0 )
__a , __a : Union[str, Any] = img.shape
__a : list[list[int]] = []
__a : Optional[Any] = img.copy()
__a : List[Any] = cva.cvtColor(__a , cva.COLOR_GRAY2RGB )
__a , __a : List[str] = np.gradient(__a )
__a : Tuple = dx**2
__a : str = dy**2
__a : Union[str, Any] = dx * dy
__a : str = 0.04
__a : Dict = self.window_size // 2
for y in range(__a , h - offset ):
for x in range(__a , w - offset ):
__a : Any = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : int = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : List[str] = (wxx * wyy) - (wxy**2)
__a : str = wxx + wyy
__a : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__lowercase : Tuple = HarrisCorner(0.04, 3)
__lowercase , __lowercase : int = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 27 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowerCamelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
def __lowerCamelCase ( A__ , A__=100 , A__=" " ) -> List[str]:
"""simple docstring"""
UpperCamelCase = text.split(A__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A__ ) , A__ )]
def __lowerCamelCase ( A__ ) -> dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(A__ ):
titles.append(title if title is not None else '' )
texts.append(A__ )
return {"title": titles, "text": texts}
def __lowerCamelCase ( A__ , A__ , A__ ) -> dict:
"""simple docstring"""
UpperCamelCase = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=A__ , padding='longest' , return_tensors='pt' )['input_ids']
UpperCamelCase = ctx_encoder(input_ids.to(device=A__ ) , return_dict=A__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCamelCase ( A__ , A__ , A__ , ) -> Optional[int]:
"""simple docstring"""
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCamelCase = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCamelCase = dataset.map(A__ , batched=A__ , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCamelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A__ )
UpperCamelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCamelCase = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
UpperCamelCase = dataset.map(
partial(A__ , ctx_encoder=A__ , ctx_tokenizer=A__ ) , batched=A__ , batch_size=processing_args.batch_size , features=A__ , )
# And finally save your dataset
UpperCamelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(A__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCamelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=A__ )
# And save the index
UpperCamelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(A__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default=str(Path(_a ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_SCREAMING_SNAKE_CASE = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_SCREAMING_SNAKE_CASE = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=str(Path(_a ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_SCREAMING_SNAKE_CASE = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowerCamelCase : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 28 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
while b:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = b, a % b
return a
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b )
def lowercase__ ( ):
'''simple docstring'''
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 29 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> Union[str, Any]:
debug_launcher(test_script.main )
def _lowercase ( self : List[Any] ) -> Tuple:
debug_launcher(test_ops.main )
| 30 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_UpperCAmelCase : Optional[int] = nn.ModuleList(A )
def _A ( self : Dict , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase : str = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = down_samples, mid_sample
else:
_UpperCAmelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _A ( self : List[str] , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
_UpperCAmelCase : Tuple = model_path_to_save + F"""_{idx}"""
@classmethod
def _A ( cls : int , A : Optional[Union[str, os.PathLike]] , **A : Tuple ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase : int = pretrained_model_path
while os.path.isdir(A ):
_UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
_UpperCAmelCase : Dict = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(A )} controlnets loaded from {pretrained_model_path}.""" )
if len(A ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(A )
| 31 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCAmelCase_ : Any = namedtuple('covid_data', 'cases deaths recovered')
def SCREAMING_SNAKE_CASE_ ( __A : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
a_ : Tuple = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__A ).content ).xpath(__A ) )
UpperCAmelCase_ : Optional[int] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 32 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase ( ):
lowercase_ : Union[str, Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
lowercase_ : List[Any] = Dataset.from_dict(__snake_case )
return dataset
class _UpperCAmelCase ( _A ):
def A ( self : Any ) -> Optional[Any]:
lowercase_ : Optional[Any] = get_dataset()
lowercase_ : int = make_duplicate_clusters(A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self : Optional[int] ) -> int:
lowercase_ : List[Any] = get_dataset()
lowercase_ , lowercase_ : Optional[Any] = deduplicate_dataset(A )
self.assertEqual(len(A ) , 2 )
print(A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , A )
| 33 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case_ (_a : str ):
UpperCAmelCase = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def snake_case_ (_a : Union[str, Any] , _a : str ):
UpperCAmelCase = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def snake_case_ (_a : Union[str, Any] ):
UpperCAmelCase = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def snake_case_ ():
UpperCAmelCase = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def snake_case_ (_a : Optional[Any] , _a : str , _a : Dict , _a : Optional[Any] ):
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = UpperCAmelCase = CvtConfig(num_labels=_a , idalabel=_a , labelaid=_a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase = [2, 2, 2_0]
UpperCAmelCase = [3, 1_2, 1_6]
UpperCAmelCase = [1_9_2, 7_6_8, 1_0_2_4]
UpperCAmelCase = CvtForImageClassification(_a )
UpperCAmelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase = image_size
UpperCAmelCase = torch.load(_a , map_location=torch.device('''cpu''' ) )
UpperCAmelCase = OrderedDict()
UpperCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase = list_of_state_dict + cls_token(_a )
UpperCAmelCase = list_of_state_dict + embeddings(_a )
for cnt in range(config.depth[idx] ):
UpperCAmelCase = list_of_state_dict + attention(_a , _a )
UpperCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_a )
for i in range(len(_a ) ):
UpperCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_84,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A =parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 34 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000_000 ) -> int:
snake_case__ : int = limit + 1
snake_case__ : Dict = [0] * limit
for first_term in range(1 , _lowerCAmelCase ):
for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
snake_case__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 35 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=4, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[str] = use_attention_mask
_lowerCAmelCase : str = use_token_type_ids
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Dict = num_choices
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_attention_mask:
_lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, tie_weights_=__a, )
return config, input_ids, attention_mask
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class_name.from_pretrained("distilbert-base-uncased")
_lowerCAmelCase : str = model(np.ones((1, 1)))
self.assertIsNotNone(__a)
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased")
_lowerCAmelCase : str = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
_lowerCAmelCase : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_lowerCAmelCase : Dict = model(__a, attention_mask=__a)[0]
_lowerCAmelCase : List[Any] = (1, 11, 768)
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Optional[Any] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], __a, atol=1E-4))
| 36 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 0 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (-y * np.log(UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = np.dot(UpperCamelCase , UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=70000 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase ):
lowerCAmelCase__ : Dict = np.dot(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = sigmoid_function(UpperCamelCase )
lowerCAmelCase__ : Any = np.dot(x.T , h - y ) / y.size
lowerCAmelCase__ : Dict = theta - alpha * gradient # updating the weights
lowerCAmelCase__ : Dict = np.dot(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : int = sigmoid_function(UpperCamelCase )
lowerCAmelCase__ : Any = cost_function(UpperCamelCase , UpperCamelCase )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCAmelCase = datasets.load_iris()
_lowerCAmelCase = iris.data[:, :2]
_lowerCAmelCase = (iris.target != 0) * 1
_lowerCAmelCase = 0.1
_lowerCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sigmoid_function(
np.dot(UpperCamelCase , UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 37 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Dict , __lowerCamelCase : Any=0.01 , __lowerCamelCase : Optional[Any]=1_000 ):
UpperCamelCase :Dict = p_stop
UpperCamelCase :Dict = max_length
def __iter__( self : List[str] ):
UpperCamelCase :Optional[Any] = 0
UpperCamelCase :int = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCamelCase :List[Any] = random.random() < self.p_stop
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=True ):
UpperCamelCase :str = [
BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
for i in range(2 )
]
UpperCamelCase :Union[str, Any] = [list(__lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCamelCase ) for shard in batch_sampler_shards] , [len(__lowerCamelCase ) for e in expected] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[str] ):
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase :Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase :Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCamelCase :List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Dict ):
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase :Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
UpperCamelCase :List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase :Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
UpperCamelCase :Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCamelCase :Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :List[str] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
UpperCamelCase :Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :int = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
def _A ( self : Dict ):
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase :str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase :List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :str = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCamelCase :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
UpperCamelCase :Optional[int] = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
def _A ( self : List[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase :Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase :int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase :str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCamelCase :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :str = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
UpperCamelCase :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Tuple = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
def _A ( self : Dict ):
UpperCamelCase :Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCamelCase :int = [BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , even_batches=__lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Tuple=False ):
random.seed(__lowerCamelCase )
UpperCamelCase :Any = list(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = [
IterableDatasetShard(
__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase , num_processes=__lowerCamelCase , process_index=__lowerCamelCase , split_batches=__lowerCamelCase , )
for i in range(__lowerCamelCase )
]
UpperCamelCase :Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCamelCase )
iterable_dataset_lists.append(list(__lowerCamelCase ) )
UpperCamelCase :List[str] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCamelCase :Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(len(__lowerCamelCase ) % shard_batch_size == 0 )
UpperCamelCase :List[str] = []
for idx in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCamelCase ) < len(__lowerCamelCase ):
reference += reference
self.assertListEqual(__lowerCamelCase , reference[: len(__lowerCamelCase )] )
def _A ( self : int ):
UpperCamelCase :Optional[int] = 42
UpperCamelCase :Optional[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
# Edge case with a very small dataset
UpperCamelCase :Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
def _A ( self : Tuple ):
UpperCamelCase :Union[str, Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCamelCase )
UpperCamelCase :Any = SkipBatchSampler(__lowerCamelCase , 2 )
self.assertListEqual(list(__lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _A ( self : Optional[Any] ):
UpperCamelCase :Tuple = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _A ( self : int ):
UpperCamelCase :Dict = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCamelCase :Union[str, Any] = skip_first_batches(__lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _A ( self : List[str] ):
Accelerator()
UpperCamelCase :Optional[int] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "xlm"
UpperCamelCase__ = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , UpperCAmelCase=3_0145 , UpperCAmelCase=2048 , UpperCAmelCase=12 , UpperCAmelCase=16 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=True , UpperCAmelCase=512 , UpperCAmelCase=2048**-0.5 , UpperCAmelCase=1e-12 , UpperCAmelCase=0.02 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=5 , UpperCAmelCase=True , UpperCAmelCase="first" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=0.1 , UpperCAmelCase=5 , UpperCAmelCase=5 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = emb_dim
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = use_lang_emb
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = bos_index
_UpperCAmelCase = eos_index
_UpperCAmelCase = pad_index
_UpperCAmelCase = unk_index
_UpperCAmelCase = mask_index
_UpperCAmelCase = is_encoder
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = embed_init_std
_UpperCAmelCase = init_std
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_proj_to_labels
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = start_n_top
_UpperCAmelCase = end_n_top
_UpperCAmelCase = mask_token_id
_UpperCAmelCase = lang_id
if "n_words" in kwargs:
_UpperCAmelCase = kwargs['n_words']
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , **UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 39 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
"""simple docstring"""
def lowercase ( A_ , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = [False] * len(A_ )
a : int = []
queue.append(A_ )
a : int = True
while queue:
a : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
a : Dict = True
a : Optional[int] = u
return visited[t]
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : int = [-1] * (len(A_ ))
a : List[Any] = 0
while bfs(A_ , A_ , A_ , A_ ):
a : Tuple = float("Inf" )
a : List[str] = sink
while s != source:
# Find the minimum value in select path
a : List[Any] = min(A_ , graph[parent[s]][s] )
a : str = parent[s]
max_flow += path_flow
a : Optional[Any] = sink
while v != source:
a : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a : str = parent[v]
return max_flow
__lowercase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowercase , __lowercase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> float:
lowerCamelCase__ : Tuple = 0
while len(UpperCamelCase ) > 1:
lowerCamelCase__ : Dict = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCamelCase__ : Dict = files.index(min(UpperCamelCase ) )
temp += files[min_index]
files.pop(UpperCamelCase )
files.append(UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__A , __A ) or not isinstance(__A , __A ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case = float(factorial(__A ) )
coefficient /= factorial(__A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 42 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowercase = get_logger(__name__)
class lowerCamelCase_ ( enum.Enum ):
'''simple docstring'''
a__ : Union[str, Any] = """all_checks"""
a__ : List[Any] = """basic_checks"""
a__ : List[Any] = """no_checks"""
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__UpperCamelCase :int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__UpperCamelCase :Any = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__UpperCamelCase :Union[str, Any] = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('''All the splits matched successfully.''' )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if record_checksum:
__UpperCamelCase :List[Any] = shaaaa()
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = m.hexdigest()
else:
__UpperCamelCase :Optional[int] = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 43 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = hidden_states.shape
_lowerCAmelCase : List[Any] = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase : str = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase : Optional[int] = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : int = None
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = None
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : Tuple = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Tuple = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Optional[Any] = nn.Dense(a__ , dtype=self.dtype )
_lowerCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Any = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : Optional[Any] = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : str = None
if use_nin_shortcut:
_lowerCAmelCase : Dict = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , a__ , a__ , a__=True ):
_lowerCAmelCase : Union[str, Any] = hidden_states
_lowerCAmelCase : Union[str, Any] = self.norma(a__ )
_lowerCAmelCase : Union[str, Any] = nn.swish(a__ )
_lowerCAmelCase : Dict = self.conva(a__ )
_lowerCAmelCase : Any = self.time_emb_proj(nn.swish(a__ ) )
_lowerCAmelCase : int = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
_lowerCAmelCase : Optional[Any] = hidden_states + temb
_lowerCAmelCase : Optional[Any] = self.norma(a__ )
_lowerCAmelCase : int = nn.swish(a__ )
_lowerCAmelCase : Optional[int] = self.dropout(a__ , a__ )
_lowerCAmelCase : List[Any] = self.conva(a__ )
if self.conv_shortcut is not None:
_lowerCAmelCase : Tuple = self.conv_shortcut(a__ )
return hidden_states + residual
| 44 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , _a=None , *_a , **_a ):
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__a = self.model.config
else:
__a = config
__a = data_args
__a = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
__a = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__a = label_smoothed_nll_loss
def __UpperCAmelCase ( self , _a ):
if self.optimizer is None:
__a = ['''bias''', '''LayerNorm.weight''']
__a = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
__a = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__a = Adafactor
__a = {'''scale_parameter''': False, '''relative_step''': False}
else:
__a = AdamW
__a = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
__a = self.args.learning_rate
if self.sharded_ddp:
__a = OSS(
params=_a , optim=_a , **_a , )
else:
__a = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
__a = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def __UpperCAmelCase ( self , _a ):
__a = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__a = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__a = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__a = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def __UpperCAmelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCAmelCase ( self , _a , _a , _a ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__a = model(**_a , use_cache=_a )[0]
__a = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__a , __a = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
__a = model(**_a , use_cache=_a )[0]
__a = torch.nn.functional.log_softmax(_a , dim=-1 )
__a , __a = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCAmelCase ( self , _a , _a ):
__a = inputs.pop('''labels''' )
__a , __a = self._compute_loss(_a , _a , _a )
return loss
def __UpperCAmelCase ( self , _a , _a , _a , _a = None , ):
__a = self._prepare_inputs(_a )
__a = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__a = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__a = self._pad_tensors_to_max_len(_a , gen_kwargs['''max_length'''] )
__a = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
__a , __a = self._compute_loss(_a , _a , _a )
__a = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__a = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__a = self._pad_tensors_to_max_len(_a , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def __UpperCAmelCase ( self , _a , _a ):
# If PAD token is not defined at least EOS token has to be defined
__a = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
__a = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__a = tensor
return padded_tensor
| 45 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263 | 0 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = model.config
lowerCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
lowerCAmelCase = MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , add_cross_attention=SCREAMING_SNAKE_CASE , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE , add_final_layer_norm=SCREAMING_SNAKE_CASE , )
return encoder_config, decoder_config
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if "encoder.model" in name:
lowerCAmelCase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowerCAmelCase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowerCAmelCase = """encoder.""" + name
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowerCAmelCase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowerCAmelCase = """encoder.layernorm.bias"""
return name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCAmelCase = key.split(""".""" )
lowerCAmelCase = int(key_split[3] )
lowerCAmelCase = int(key_split[5] )
lowerCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCAmelCase = val
return orig_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Tuple=False ):
'''simple docstring'''
lowerCAmelCase = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE ).eval()
# load HuggingFace model
lowerCAmelCase , lowerCAmelCase = get_configs(SCREAMING_SNAKE_CASE )
lowerCAmelCase = DonutSwinModel(SCREAMING_SNAKE_CASE )
lowerCAmelCase = MBartForCausalLM(SCREAMING_SNAKE_CASE )
lowerCAmelCase = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = original_model.state_dict()
lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify results on scanned document
lowerCAmelCase = load_dataset("""hf-internal-testing/example-documents""" )
lowerCAmelCase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE , from_slow=SCREAMING_SNAKE_CASE )
lowerCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCAmelCase = DonutProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCAmelCase = """When is the coffee break?"""
lowerCAmelCase = task_prompt.replace("""{user_input}""" , SCREAMING_SNAKE_CASE )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCAmelCase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCAmelCase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCAmelCase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCAmelCase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCAmelCase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowerCAmelCase = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )[
"""input_ids"""
]
lowerCAmelCase = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = model.encoder.embeddings(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
# verify encoder hidden states
lowerCAmelCase = original_model.encoder(SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.encoder(SCREAMING_SNAKE_CASE ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-2 )
# verify decoder hidden states
lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 46 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class A__ ( A__ ):
A__ = 'audio-spectrogram-transformer'
def __init__( self : Union[str, Any] , _a : Tuple=768 , _a : List[str]=12 , _a : Any=12 , _a : Optional[Any]=3072 , _a : List[str]="gelu" , _a : Optional[Any]=0.0 , _a : Dict=0.0 , _a : Optional[Any]=0.02 , _a : int=1e-12 , _a : int=16 , _a : Optional[Any]=True , _a : Optional[int]=10 , _a : Optional[Any]=10 , _a : Optional[int]=1024 , _a : List[str]=128 , **_a : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =frequency_stride
_SCREAMING_SNAKE_CASE =time_stride
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =num_mel_bins
| 47 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE__ : Any = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def A ( _SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Optional[int] = {}
state_dict.pop("pixel_mean" ,_SCREAMING_SNAKE_CASE )
state_dict.pop("pixel_std" ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase : Dict = key.replace(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = int(re.match(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
lowerCamelCase : int = key.replace("layers.0" ,"proj_in" )
elif layer_nb == 1:
lowerCamelCase : int = key.replace("layers.1" ,"layers.0" )
elif layer_nb == 2:
lowerCamelCase : Tuple = key.replace("layers.2" ,"proj_out" )
lowerCamelCase : List[Any] = value
lowerCamelCase : Tuple = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> List[str]:
lowerCamelCase : Union[str, Any] = hf_hub_download(_SCREAMING_SNAKE_CASE ,f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase : Optional[Any] = SamVisionConfig(
hidden_size=1024 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
lowerCamelCase : int = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE ,)
elif "sam_vit_h" in model_name:
lowerCamelCase : Optional[int] = SamVisionConfig(
hidden_size=1280 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
lowerCamelCase : int = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE ,map_location="cpu" )
lowerCamelCase : List[Any] = replace_keys(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = SamImageProcessor()
lowerCamelCase : List[str] = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = hf_model.to("cuda" )
lowerCamelCase : Dict = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
lowerCamelCase : Dict = [[[400, 650]]]
lowerCamelCase : str = [[1]]
lowerCamelCase : Dict = processor(images=np.array(_SCREAMING_SNAKE_CASE ) ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : str = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
lowerCamelCase : str = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) ,input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Any = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
lowerCamelCase : Any = ((75, 275, 1725, 850),)
lowerCamelCase : Any = processor(images=np.array(_SCREAMING_SNAKE_CASE ) ,input_boxes=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
lowerCamelCase : Optional[Any] = [[[400, 650], [800, 650]]]
lowerCamelCase : Any = [[1, 1]]
lowerCamelCase : int = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) ,input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ : int = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 48 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
from math import ceil, sqrt
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 49 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : List[Any] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A )
_UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 263 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = CpmAntTokenizer
UpperCAmelCase__ = False
def A_ ( self : List[str] ) -> Union[str, Any]:
super().setUp()
lowerCamelCase__ : List[Any] = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : int = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowerCamelCase__ : Any = '今天天气真好!'
lowerCamelCase__ : Dict = ['今天', '天气', '真', '好', '!']
lowerCamelCase__ : Any = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[str] = '今天天气真好!'
lowerCamelCase__ : List[str] = [tokenizer.bos_token] + tokens
lowerCamelCase__ : int = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
lowerCamelCase__ : str = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : Dict = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = '''ml.p3.2xlarge'''
UpperCAmelCase__ : str = '''accelerate_sagemaker_execution_role'''
UpperCAmelCase__ : int = '''hf-sm'''
UpperCAmelCase__ : List[str] = '''us-east-1'''
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : List[Any] = '''accelerate-sagemaker-1'''
UpperCAmelCase__ : int = '''1.6'''
UpperCAmelCase__ : Tuple = '''4.4'''
UpperCAmelCase__ : str = '''train.py'''
UpperCAmelCase__ : Union[str, Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCAmelCase__ : Union[str, Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , _snake_case)
assert isinstance(converted_args['''do_train'''] , _snake_case)
assert isinstance(converted_args['''epochs'''] , _snake_case)
assert isinstance(converted_args['''learning_rate'''] , _snake_case)
assert isinstance(converted_args['''max_steps'''] , _snake_case)
with pytest.raises(_snake_case):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 51 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> int:
UpperCamelCase : List[str] = {}
if train_file is not None:
UpperCamelCase : List[Any] = [train_file]
if eval_file is not None:
UpperCamelCase : Dict = [eval_file]
if test_file is not None:
UpperCamelCase : Any = [test_file]
UpperCamelCase : str = datasets.load_dataset("csv" , data_files=_lowerCAmelCase )
UpperCamelCase : Dict = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase : Union[str, Any] = features_name.pop(_lowerCAmelCase )
UpperCamelCase : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase : Any = {label: i for i, label in enumerate(_lowerCAmelCase )}
UpperCamelCase : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase : Any = {}
if len(_lowerCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase : Dict = ds[k].map(
lambda _lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" ) , batched=_lowerCAmelCase , )
elif len(_lowerCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase : Any = ds[k].map(
lambda _lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , ) , batched=_lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase : List[str] = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase : Dict = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase : Optional[int] = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase : Tuple = (
tf.data.Dataset.from_generator(
_lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase : List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class A__ :
_UpperCAmelCase :int = field(metadata={'help': 'Which column contains the label'} )
_UpperCAmelCase :str = field(default=__snake_case , metadata={'help': 'The path of the training file'} )
_UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'The path of the development file'} )
_UpperCAmelCase :Optional[str] = field(default=__snake_case , metadata={'help': 'The path of the test file'} )
_UpperCAmelCase :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCAmelCase :bool = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
_UpperCAmelCase :str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCAmelCase :bool = field(default=__snake_case , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase :Optional[str] = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def A_ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCAmelCase ) -> Dict:
UpperCamelCase : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase : List[Any] = TFTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase : Optional[Any] = trainer.evaluate()
UpperCamelCase : Optional[int] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class snake_case ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : List[Any]="sayef/fsner-bert-base-uncased" ):
super(__A , self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(__A , return_dict=__A )
__UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def _lowerCamelCase ( self : Tuple , **__A : Optional[int] ):
return self.bert(**__A ).last_hidden_state
def _lowerCamelCase ( self : Tuple , __A : Tuple ):
return token_embeddings.sum(2 , keepdim=__A )
def _lowerCamelCase ( self : List[Any] , __A : str , __A : int , __A : str=1 ):
return self.softmax(T * self.cos(__A , __A ) )
def _lowerCamelCase ( self : Optional[int] , __A : str , __A : Any ):
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**__A )
__UpperCamelCase = self.BERT(**__A )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__A ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 53 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : int = LEDConfig
snake_case__ : Tuple = {}
snake_case__ : List[Any] = "gelu"
def __init__( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=1_3 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=2_0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[str]=4 , ) -> Dict:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(UpperCAmelCase__ )[:, :-1], tf.ones_like(UpperCAmelCase__ )[:, -1:]] , axis=-1 , )
__SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE = TFLEDModel(config=UpperCAmelCase__ ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case__ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : str = True
snake_case__ : Tuple = False
snake_case__ : List[str] = False
snake_case__ : int = False
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase__ : Optional[int] ):
__SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase__ : Optional[Any] ):
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_decoder_attentions_output(UpperCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self : List[str] ) -> str:
# TODO: Head-masking not yet implement
pass
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
a__ : str = 1E-4
@slow
@require_tf
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> str:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-3 )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-3 , rtol=1E-3 )
| 54 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase_ = len(UpperCamelCase )
self.assertGreater(UpperCamelCase , 0 )
self.assertEqual(
UpperCamelCase , [
{
"score": ANY(UpperCamelCase ),
"label": ANY(UpperCamelCase ),
"box": {"xmin": ANY(UpperCamelCase ), "ymin": ANY(UpperCamelCase ), "xmax": ANY(UpperCamelCase ), "ymax": ANY(UpperCamelCase )},
}
for i in range(UpperCamelCase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 0.2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 55 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = CycleDiffusionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(lowercase_ )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ = image / 2 + 0.5
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(lowercase_ )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Union[str, Any] ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase_ , '''half''' ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def A_ ( self : List[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A_ ( self : List[str] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 56 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''codegen'''
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A=5_0400 , A=2048 , A=2048 , A=4096 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1e-5 , A=0.02 , A=True , A=5_0256 , A=5_0256 , A=False , **A , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_ctx
_SCREAMING_SNAKE_CASE = n_positions
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = n_inner
_SCREAMING_SNAKE_CASE = rotary_dim
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = attn_pdrop
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A = "default" , A = None , A = False , ) -> str:
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , """pad_token_id""" , A ):
# TODO: how to do that better?
_SCREAMING_SNAKE_CASE = 0
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
_SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_( self ) -> int:
return self._config.n_layer
@property
def snake_case_( self ) -> int:
return self._config.n_head
def snake_case_( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
_SCREAMING_SNAKE_CASE = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
_SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE = seqlen + 2
_SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_SCREAMING_SNAKE_CASE = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
_SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
_SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
_SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def snake_case_( self ) -> int:
return 13
| 58 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ):
def get_dataset(UpperCamelCase__ : List[str] ):
_UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
_UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
_UpperCAmelCase : Tuple = []
for epoch in range(UpperCamelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
_UpperCAmelCase , _UpperCAmelCase : Dict = batch
_UpperCAmelCase : int = model(UpperCamelCase__ )
_UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
accelerator.backward(UpperCamelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) )
_UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
def __lowerCAmelCase ( self , A ) -> Tuple:
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Optional[Any] = DummyModel()
_UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
_UpperCAmelCase : Optional[int] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
A , A , A , A )
# Save initial
_UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' )
accelerator.save_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
_UpperCAmelCase : Tuple = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : List[Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : Dict = DummyModel()
_UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A )
accelerator.load_state(A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : List[str] = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A )
# Save everything
_UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' )
accelerator.save_state(A )
# Load everything back in and make sure all states work
accelerator.load_state(A )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(
A , A , A , A )
# Save initial
accelerator.save_state()
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Dict = optimizer.state_dict()
_UpperCAmelCase : int = train(3 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_UpperCAmelCase : List[Any] = DummyModel()
_UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A )
_UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
A , A , A , A )
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item()
_UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
_UpperCAmelCase : List[str] = train(2 , A , A , A , A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A , A , A , A )
((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
_UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] )
_UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] )
_UpperCAmelCase : Optional[int] = DummyModel()
_UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() )
_UpperCAmelCase : Optional[int] = Accelerator()
with self.assertRaises(A ) as ve:
accelerator.register_for_checkpointing(A , A , A , A )
_UpperCAmelCase : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : Tuple = DummyModel()
_UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 )
_UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders()
_UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A )
# Train baseline
_UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare(
A , A , A , A , A )
# Save initial
accelerator.save_state()
_UpperCAmelCase : List[str] = scheduler.state_dict()
train(3 , A , A , A , A , A )
self.assertNotEqual(A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A , scheduler.state_dict() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_UpperCAmelCase : int = DummyModel()
_UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 )
# Train baseline
_UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A )
_UpperCAmelCase : Optional[Any] = accelerator.prepare(A )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing'
_lowerCAmelCase :Any = DummyModel()
_lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3)
_lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders()
_lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCAmelCase :int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowerCAmelCase :Dict = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowerCAmelCase :List[Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowerCAmelCase :Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 263 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase ( A_ ):
A__ : List[Any] = "canine"
def __init__(self : Dict , snake_case__ : Dict=7_68 , snake_case__ : Tuple=12 , snake_case__ : Optional[int]=12 , snake_case__ : Optional[Any]=30_72 , snake_case__ : List[Any]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=1_63_84 , snake_case__ : List[Any]=16 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=1e-12 , snake_case__ : Tuple=0 , snake_case__ : Optional[int]=0XE_0_0_0 , snake_case__ : Dict=0XE_0_0_1 , snake_case__ : int=4 , snake_case__ : Union[str, Any]=4 , snake_case__ : Union[str, Any]=8 , snake_case__ : List[str]=1_63_84 , snake_case__ : List[str]=1_28 , **snake_case__ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
snake_case : Any = max_position_embeddings
snake_case : List[Any] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : str = initializer_range
snake_case : int = type_vocab_size
snake_case : List[Any] = layer_norm_eps
# Character config:
snake_case : str = downsampling_rate
snake_case : Dict = upsampling_kernel_size
snake_case : List[str] = num_hash_functions
snake_case : Optional[int] = num_hash_buckets
snake_case : Dict = local_transformer_stride
| 59 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _snake_case ( _snake_case : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(_snake_case , 0 , _snake_case , args=(_snake_case) )[0]
def _snake_case ( _snake_case : float , _snake_case : float ):
return math.pow(_snake_case , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.