code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "dandelin/vilt-b32-finetuned-vqa"
_UpperCAmelCase : Optional[int] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
_UpperCAmelCase : List[str] = "image_qa"
_UpperCAmelCase : Dict = AutoProcessor
_UpperCAmelCase : Dict = AutoModelForVisualQuestionAnswering
_UpperCAmelCase : Optional[int] = ["image", "text"]
_UpperCAmelCase : Tuple = ["text"]
def __init__( self : Optional[int] , *lowercase : int , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : "Image" , lowercase : str ):
'''simple docstring'''
return self.pre_processor(lowercase , lowercase , return_tensors='pt' )
def A ( self : str , lowercase : Tuple ):
'''simple docstring'''
with torch.no_grad():
return self.model(**lowercase ).logits
def A ( self : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 282 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = RoFormerTokenizer
_UpperCAmelCase : Optional[Any] = RoFormerTokenizerFast
_UpperCAmelCase : str = True
_UpperCAmelCase : Any = True
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
def A ( self : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase )
def A ( self : List[Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = '永和服装饰品有限公司,今天天气非常好'
_snake_case = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizer()
_snake_case , _snake_case = self.get_chinese_input_output_texts()
_snake_case = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , output_text.split() )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.get_rust_tokenizer()
_snake_case , _snake_case = self.get_chinese_input_output_texts()
_snake_case = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , output_text.split() )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
pass
def A ( self : Tuple ):
'''simple docstring'''
pass
def A ( self : Optional[Any] ):
'''simple docstring'''
pass | 282 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , lowercase : Dict , lowercase : Any ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__( self : Dict , lowercase : int = 1 , lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase : float = 0.0 , lowercase : int = 50 , lowercase : Optional[bool] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size , lowercase ):
_snake_case = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_snake_case = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_snake_case = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_snake_case = self.unet(lowercase , lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase ).prev_sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase ) | 282 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , *lowercase : Optional[Any] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 1 |
def a_ ( __lowercase : int = 1_000 ) -> int:
_snake_case = -1
_snake_case = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_snake_case = (n * n - 2 * a * n) // (2 * n - 2 * a)
_snake_case = n - a - b
if c * c == (a * a + b * b):
_snake_case = a * b * c
if candidate >= product:
_snake_case = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }') | 282 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 1 |
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
_snake_case = [
[],
[],
[],
]
def A ( self : List[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowercase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def A ( self : List[str] ):
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Dict ):
'''simple docstring'''
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] ):
'''simple docstring'''
_snake_case = []
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
_snake_case = min(self.queue )
self.queue.remove(lowercase )
return data
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.queue )
def a_ ( ) -> Dict:
_snake_case = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(__lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a_ ( ) -> Union[str, Any]:
_snake_case = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(__lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 282 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCamelCase : Any = '''true'''
def a_ ( __lowercase : str , __lowercase : Optional[int]=82 , __lowercase : Optional[int]=16 ) -> Optional[Any]:
set_seed(42 )
_snake_case = RegressionModel()
_snake_case = deepcopy(__lowercase )
_snake_case = RegressionDataset(length=__lowercase )
_snake_case = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
_snake_case , _snake_case = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def a_ ( __lowercase : Accelerator , __lowercase : Dict=False ) -> int:
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__lowercase : Any ):
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
_snake_case = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__lowercase : int ):
if use_longest:
return tokenizer.pad(__lowercase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__lowercase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=16 )
def a_ ( __lowercase : Dict , __lowercase : Optional[Any] ) -> Optional[int]:
_snake_case = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
_snake_case = get_dataloader(__lowercase , not dispatch_batches )
_snake_case = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__lowercase )
_snake_case , _snake_case = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a_ ( __lowercase : int , __lowercase : str , __lowercase : Dict ) -> Tuple:
_snake_case = []
for batch in dataloader:
_snake_case , _snake_case = batch.values()
with torch.no_grad():
_snake_case = model(__lowercase )
_snake_case , _snake_case = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_snake_case , _snake_case = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
_snake_case , _snake_case = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def a_ ( __lowercase : Accelerator , __lowercase : Tuple=82 , __lowercase : str=False , __lowercase : List[Any]=False , __lowercase : Dict=16 ) -> List[str]:
_snake_case , _snake_case , _snake_case = get_basic_setup(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def a_ ( __lowercase : bool = False , __lowercase : bool = False ) -> str:
_snake_case = evaluate.load('glue' , 'mrpc' )
_snake_case , _snake_case = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
_snake_case , _snake_case , _snake_case = setup['no']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
_snake_case = model(**__lowercase )
_snake_case = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['labels'] )
_snake_case = metric.compute()
# Then do distributed
_snake_case , _snake_case , _snake_case = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_snake_case = model(**__lowercase )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case = batch['labels']
_snake_case , _snake_case = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
_snake_case = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def a_ ( ) -> int:
_snake_case = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_snake_case = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
_snake_case = Accelerator()
test_torch_metrics(__lowercase , 512 )
accelerator.state._reset_state()
def a_ ( __lowercase : Union[str, Any] ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 282 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "bart"
_UpperCAmelCase : int = ["past_key_values"]
_UpperCAmelCase : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowercase : Optional[int]=50_265 , lowercase : List[Any]=1_024 , lowercase : Tuple=12 , lowercase : Optional[Any]=4_096 , lowercase : str=16 , lowercase : Any=12 , lowercase : int=4_096 , lowercase : List[str]=16 , lowercase : Any=0.0 , lowercase : Any=0.0 , lowercase : Dict="gelu" , lowercase : str=1_024 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.0 , lowercase : List[str]=0.0 , lowercase : Tuple=0.02 , lowercase : Optional[Any]=0.0 , lowercase : Any=False , lowercase : List[Any]=True , lowercase : Optional[Any]=3 , lowercase : List[Any]=1 , lowercase : List[str]=0 , lowercase : Dict=2 , lowercase : List[str]=True , lowercase : List[str]=2 , lowercase : Union[str, Any]=2 , **lowercase : int , ):
'''simple docstring'''
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = classifier_dropout
_snake_case = use_cache
_snake_case = encoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowercase ):
_snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@property
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case = {0: 'batch'}
_snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowercase ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def A ( self : Dict ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super().outputs
else:
_snake_case = super(lowercase , self ).outputs
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowercase ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def A ( self : Optional[int] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
_snake_case = seq_length if not self.use_past else 1
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
_snake_case = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_snake_case = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
_snake_case = common_inputs['decoder_input_ids'].shape[1]
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = decoder_seq_length + 3
_snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowercase , lowercase )] , dim=1 )
_snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case , _snake_case = self.num_layers
_snake_case = min(lowercase , lowercase )
_snake_case = max(lowercase , lowercase ) - min_num_layers
_snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
_snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case , _snake_case = self.num_layers
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = common_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
_snake_case = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case = tokenizer.num_special_tokens_to_add(lowercase )
_snake_case = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
_snake_case = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[Any] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
elif self.task == "causal-lm":
_snake_case = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
_snake_case = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase ) | 282 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 282 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
_lowerCamelCase : List[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
_lowerCamelCase : Optional[int] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
_lowerCamelCase : Tuple = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def A ( self : Optional[int] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def A ( self : Union[str, Any] , lowercase : Dict , lowercase : Dict , lowercase : List[str]=None , lowercase : Tuple="uniform_average" , lowercase : Any=True ):
'''simple docstring'''
_snake_case = mean_squared_error(
lowercase , lowercase , sample_weight=lowercase , multioutput=lowercase , squared=lowercase )
return {"mse": mse} | 282 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 1 |
from math import pi, sqrt, tan
def a_ ( __lowercase : float ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def a_ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a_ ( __lowercase : float ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def a_ ( __lowercase : float ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a_ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_snake_case = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__lowercase , 2 ) * torus_radius * tube_radius
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def a_ ( __lowercase : float ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def a_ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_snake_case = (sidea + sidea + sidea) / 2
_snake_case = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def a_ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def a_ ( __lowercase : float ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def a_ ( __lowercase : int , __lowercase : float ) -> float:
if not isinstance(__lowercase , __lowercase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }') | 282 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( __lowercase : int , __lowercase : List[str] ) -> Tuple:
_snake_case = checkpoint
_snake_case = {}
_snake_case = vae_state_dict['encoder.conv_in.weight']
_snake_case = vae_state_dict['encoder.conv_in.bias']
_snake_case = vae_state_dict['encoder.conv_out.weight']
_snake_case = vae_state_dict['encoder.conv_out.bias']
_snake_case = vae_state_dict['encoder.norm_out.weight']
_snake_case = vae_state_dict['encoder.norm_out.bias']
_snake_case = vae_state_dict['decoder.conv_in.weight']
_snake_case = vae_state_dict['decoder.conv_in.bias']
_snake_case = vae_state_dict['decoder.conv_out.weight']
_snake_case = vae_state_dict['decoder.conv_out.bias']
_snake_case = vae_state_dict['decoder.norm_out.weight']
_snake_case = vae_state_dict['decoder.norm_out.bias']
_snake_case = vae_state_dict['quant_conv.weight']
_snake_case = vae_state_dict['quant_conv.bias']
_snake_case = vae_state_dict['post_quant_conv.weight']
_snake_case = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
_snake_case = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
_snake_case = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__lowercase )
}
# Retrieves the keys for the decoder up blocks only
_snake_case = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
_snake_case = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__lowercase )
}
for i in range(__lowercase ):
_snake_case = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_snake_case = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_snake_case = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_snake_case = renew_vae_resnet_paths(__lowercase )
_snake_case = {'old': f'''down.{i}.block''', 'new': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
_snake_case = [key for key in vae_state_dict if 'encoder.mid.block' in key]
_snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_snake_case = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_snake_case = renew_vae_resnet_paths(__lowercase )
_snake_case = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
_snake_case = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
_snake_case = renew_vae_attention_paths(__lowercase )
_snake_case = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
for i in range(__lowercase ):
_snake_case = num_up_blocks - 1 - i
_snake_case = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_snake_case = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_snake_case = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_snake_case = renew_vae_resnet_paths(__lowercase )
_snake_case = {'old': f'''up.{block_id}.block''', 'new': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
_snake_case = [key for key in vae_state_dict if 'decoder.mid.block' in key]
_snake_case = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_snake_case = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_snake_case = renew_vae_resnet_paths(__lowercase )
_snake_case = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
_snake_case = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
_snake_case = renew_vae_attention_paths(__lowercase )
_snake_case = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
return new_checkpoint
def a_ ( __lowercase : str , __lowercase : str , ) -> int:
# Only support V1
_snake_case = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
_snake_case = io.BytesIO(r.content )
_snake_case = OmegaConf.load(__lowercase )
_snake_case = 512
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
_snake_case = {}
with safe_open(__lowercase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
_snake_case = f.get_tensor(__lowercase )
else:
_snake_case = torch.load(__lowercase , map_location=__lowercase )['state_dict']
# Convert the VAE model.
_snake_case = create_vae_diffusers_config(__lowercase , image_size=__lowercase )
_snake_case = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase )
_snake_case = AutoencoderKL(**__lowercase )
vae.load_state_dict(__lowercase )
vae.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
_lowerCamelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 282 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 1 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 1 |
from itertools import count
def a_ ( __lowercase : int = 50 ) -> int:
_snake_case = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }') | 282 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = CanineTokenizer
_UpperCAmelCase : Dict = False
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return CanineTokenizer.from_pretrained('google/canine-s' )
def A ( self : List[str] , **lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
_snake_case = 1_024
return tokenizer
@require_torch
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.canine_tokenizer
_snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_snake_case = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors='pt' )
self.assertIsInstance(lowercase , lowercase )
_snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.canine_tokenizer
_snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_snake_case = tokenizer(lowercase , padding=lowercase , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertIn('token_type_ids' , lowercase )
@require_torch
def A ( self : int ):
'''simple docstring'''
_snake_case = self.canine_tokenizer
_snake_case = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_snake_case = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_snake_case = chr(0xE007 )
additional_special_tokens.append(lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_snake_case = tokenizer.__class__.from_pretrained(lowercase )
_snake_case = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn(lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case , _snake_case = self.get_clean_sequence(lowercase )
# a special token for Canine can be defined as follows:
_snake_case = 0xE005
_snake_case = chr(lowercase )
tokenizer.add_special_tokens({'cls_token': special_token} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(len(lowercase ) , 1 )
_snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(lowercase , input_encoded + special_token_id )
_snake_case = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertTrue(special_token not in decoded )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = chr(0xE005 )
_snake_case = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_snake_case = tokenizer.tokenize(lowercase )
_snake_case = tokenizer.tokenize(lowercase )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(token_a[0] , lowercase )
self.assertEqual(token_a[0] , lowercase )
@require_tokenizers
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
_snake_case = 0xE006
_snake_case = chr(lowercase )
_snake_case = AddedToken(lowercase , lstrip=lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowercase )
tokenizer.from_pretrained(lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowercase )
# a special token for Canine can be defined as follows:
_snake_case = 0xE006
_snake_case = chr(lowercase )
_snake_case = [new_token_a]
_snake_case = [new_token_a]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_snake_case = 0xE007
_snake_case = chr(lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = [AddedToken(lowercase , lstrip=lowercase )]
_snake_case = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = 'hello world'
if self.space_between_special_tokens:
_snake_case = '[CLS] hello world [SEP]'
else:
_snake_case = input
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_snake_case = tokenizer.decode(lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowercase , [output, output.lower()] )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_snake_case = 'a'
_snake_case = ord(lowercase )
for attr in attributes_list:
setattr(lowercase , attr + '_id' , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + '_id' ) , lowercase )
setattr(lowercase , attr + '_id' , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + '_id' ) , lowercase )
setattr(lowercase , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens_ids' ) , [] )
_snake_case = 0xE006
_snake_case = chr(lowercase )
setattr(lowercase , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Tuple ):
'''simple docstring'''
pass | 282 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCamelCase : Optional[Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def a_ ( __lowercase : List[Any] , __lowercase : Union[str, Any]=None ) -> Optional[Any]:
require_version(deps[pkg] , __lowercase ) | 282 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def a_ ( __lowercase : List[str] ) -> str:
_snake_case = []
for line in lines:
_snake_case = re.sub(r'#.*' , '' , __lowercase ) # remove comments
if line:
filtered_lines.append(__lowercase )
_snake_case = '\n'.join(__lowercase )
# Make a hash from all this code
_snake_case = full_str.encode('utf-8' )
return shaaaa(__lowercase ).hexdigest()
# get importable module names and hash for caching
_lowerCamelCase : List[Any] = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowerCamelCase : str = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowerCamelCase : int = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
_lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''') | 282 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 1 |
def a_ ( __lowercase : list ) -> list:
if len(__lowercase ) <= 1:
return [tuple(__lowercase )]
_snake_case = []
def generate(__lowercase : int , __lowercase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __lowercase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case = arr[k - 1], arr[0]
generate(k - 1 , __lowercase )
generate(len(__lowercase ) , __lowercase )
return res
if __name__ == "__main__":
_lowerCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Any = [int(item) for item in user_input.split(''',''')]
print(heaps(arr)) | 282 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = "decision_transformer"
_UpperCAmelCase : Any = ["past_key_values"]
_UpperCAmelCase : Dict = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowercase : Optional[Any]=17 , lowercase : str=4 , lowercase : Dict=128 , lowercase : int=4_096 , lowercase : List[Any]=True , lowercase : Dict=1 , lowercase : Union[str, Any]=1_024 , lowercase : Union[str, Any]=3 , lowercase : Any=1 , lowercase : List[Any]=None , lowercase : Dict="relu" , lowercase : Optional[Any]=0.1 , lowercase : str=0.1 , lowercase : List[str]=0.1 , lowercase : List[Any]=1E-5 , lowercase : List[Any]=0.02 , lowercase : Tuple=True , lowercase : List[str]=True , lowercase : int=50_256 , lowercase : int=50_256 , lowercase : Tuple=False , lowercase : Any=False , **lowercase : List[Any] , ):
'''simple docstring'''
_snake_case = state_dim
_snake_case = act_dim
_snake_case = hidden_size
_snake_case = max_ep_len
_snake_case = action_tanh
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_layer
_snake_case = n_head
_snake_case = n_inner
_snake_case = activation_function
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = scale_attn_weights
_snake_case = use_cache
_snake_case = scale_attn_by_inverse_layer_idx
_snake_case = reorder_and_upcast_attn
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) | 282 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
_lowerCamelCase : Any = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
_lowerCamelCase : Optional[Any] = {
'''jukebox''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = PRETRAINED_LYRIC_TOKENS_SIZES
_UpperCAmelCase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : int=["v3", "v2", "v2"] , lowercase : List[Any]=512 , lowercase : Any=5 , lowercase : Any="<|endoftext|>" , **lowercase : Tuple , ):
'''simple docstring'''
_snake_case = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else unk_token
super().__init__(
unk_token=lowercase , n_genres=lowercase , version=lowercase , max_n_lyric_tokens=lowercase , **lowercase , )
_snake_case = version
_snake_case = max_n_lyric_tokens
_snake_case = n_genres
with open(lowercase , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowercase )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowercase )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowercase )
_snake_case = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_snake_case = oov.replace(R'\-\'' , R'\-+\'' )
_snake_case = regex.compile(lowercase )
_snake_case = {v: k for k, v in self.artists_encoder.items()}
_snake_case = {v: k for k, v in self.genres_encoder.items()}
_snake_case = {v: k for k, v in self.lyrics_encoder.items()}
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def A ( self : Tuple ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def A ( self : Optional[int] , lowercase : List[Any] , lowercase : List[str] , lowercase : str ):
'''simple docstring'''
_snake_case = [self.artists_encoder.get(lowercase , 0 ) for artist in list_artists]
for genres in range(len(lowercase ) ):
_snake_case = [self.genres_encoder.get(lowercase , 0 ) for genre in list_genres[genres]]
_snake_case = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_snake_case = [[self.lyrics_encoder.get(lowercase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def A ( self : List[Any] , lowercase : List[str] ):
'''simple docstring'''
return list(lowercase )
def A ( self : Tuple , lowercase : Tuple , lowercase : Dict , lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self.prepare_for_tokenization(lowercase , lowercase , lowercase )
_snake_case = self._tokenize(lowercase )
return artist, genre, lyrics
def A ( self : Any , lowercase : str , lowercase : str , lowercase : str , lowercase : bool = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_snake_case = artists[idx].lower()
_snake_case = [genres[idx].lower()]
else:
_snake_case = self._normalize(artists[idx] ) + '.v2'
_snake_case = [
self._normalize(lowercase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_snake_case = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
_snake_case = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
_snake_case = {vocab[index]: index + 1 for index in range(len(lowercase ) )}
_snake_case = 0
_snake_case = len(lowercase ) + 1
_snake_case = self.vocab
_snake_case = {v: k for k, v in self.vocab.items()}
_snake_case = ''
else:
_snake_case = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
_snake_case = self._run_strip_accents(lowercase )
_snake_case = lyrics.replace('\\' , '\n' )
_snake_case = self.out_of_vocab.sub('' , lowercase ), [], []
return artists, genres, lyrics
def A ( self : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = unicodedata.normalize('NFD' , lowercase )
_snake_case = []
for char in text:
_snake_case = unicodedata.category(lowercase )
if cat == "Mn":
continue
output.append(lowercase )
return "".join(lowercase )
def A ( self : Tuple , lowercase : str ):
'''simple docstring'''
_snake_case = (
[chr(lowercase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(lowercase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(lowercase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
_snake_case = frozenset(lowercase )
_snake_case = re.compile(R'_+' )
_snake_case = ''.join([c if c in accepted else '_' for c in text.lower()] )
_snake_case = pattern.sub('_' , lowercase ).strip('_' )
return text
def A ( self : Any , lowercase : List[str] ):
'''simple docstring'''
return " ".join(lowercase )
def A ( self : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Union[str, TensorType]] = None , lowercase : bool = False ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
_snake_case = TensorType(lowercase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
_snake_case = tf.constant
_snake_case = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
_snake_case = torch.tensor
_snake_case = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
_snake_case = jnp.array
_snake_case = _is_jax
else:
_snake_case = np.asarray
_snake_case = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_snake_case = [inputs]
if not is_tensor(lowercase ):
_snake_case = as_tensor(lowercase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Optional[Any] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Tuple="" , lowercase : Any="pt" ):
'''simple docstring'''
_snake_case = [0, 0, 0]
_snake_case = [artist] * len(self.version )
_snake_case = [genres] * len(self.version )
_snake_case , _snake_case , _snake_case = self.tokenize(lowercase , lowercase , lowercase )
_snake_case , _snake_case , _snake_case = self._convert_token_to_id(lowercase , lowercase , lowercase )
_snake_case = [-INFINITY] * len(full_tokens[-1] )
_snake_case = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowercase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def A ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowercase ) )
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowercase ) )
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowercase ) )
return (artists_file, genres_file, lyrics_file)
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : Any ):
'''simple docstring'''
_snake_case = self.artists_decoder.get(lowercase )
_snake_case = [self.genres_decoder.get(lowercase ) for genre in genres_index]
_snake_case = [self.lyrics_decoder.get(lowercase ) for character in lyric_index]
return artist, genres, lyrics | 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from math import factorial
def a_ ( __lowercase : int , __lowercase : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(__lowercase ) // (factorial(__lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F'fifty-two card deck is: {combinations(52, 5)}\n',
)
print(
'''If a class of 40 students must be arranged into groups of''',
F'4 for group projects, there are {combinations(40, 4)} ways',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F'are {combinations(10, 3)} ways that first, second and',
'''third place can be awarded.''',
) | 282 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 1 |
from __future__ import annotations
def a_ ( __lowercase : Any , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]: # noqa: E741
while r - l > 1:
_snake_case = (l + r) // 2
if v[m] >= key:
_snake_case = m
else:
_snake_case = m # noqa: E741
return r
def a_ ( __lowercase : list[int] ) -> int:
if len(__lowercase ) == 0:
return 0
_snake_case = [0] * len(__lowercase )
_snake_case = 1
_snake_case = v[0]
for i in range(1 , len(__lowercase ) ):
if v[i] < tail[0]:
_snake_case = v[i]
elif v[i] > tail[length - 1]:
_snake_case = v[i]
length += 1
else:
_snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 282 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : List[str] = datasets.load_iris()
_lowerCamelCase : List[Any] = np.array(data['''data'''])
_lowerCamelCase : Tuple = np.array(data['''target'''])
_lowerCamelCase : List[Any] = data['''target_names''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = train_test_split(X, y)
def a_ ( __lowercase : Tuple , __lowercase : List[str] ) -> Union[str, Any]:
return np.linalg.norm(np.array(__lowercase ) - np.array(__lowercase ) )
def a_ ( __lowercase : str , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : str=5 ) -> str:
_snake_case = zip(__lowercase , __lowercase )
# List of distances of all points from the point to be classified
_snake_case = []
for data_point in data:
_snake_case = euclidean_distance(data_point[0] , __lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_snake_case = [i[1] for i in sorted(__lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_snake_case = Counter(__lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 282 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = DiTPipeline
_UpperCAmelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase : Union[str, Any] = False
def A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowercase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_000 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowercase , )
_snake_case = AutoencoderKL()
_snake_case = DDIMScheduler()
_snake_case = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def A ( self : Optional[Any] , lowercase : Dict , lowercase : int=0 ):
'''simple docstring'''
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
_snake_case = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase )
_snake_case = pipe(**lowercase ).images
_snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_snake_case = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowercase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = torch.manual_seed(0 )
_snake_case = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_snake_case = ['vase', 'umbrella', 'white shark', 'white wolf']
_snake_case = pipe.get_label_ids(lowercase )
_snake_case = pipe(lowercase , generator=lowercase , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(lowercase , lowercase ):
_snake_case = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def A ( self : str ):
'''simple docstring'''
_snake_case = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_snake_case = ['vase', 'umbrella']
_snake_case = pipe.get_label_ids(lowercase )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(lowercase , generator=lowercase , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(lowercase , lowercase ):
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1 | 282 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = '''ybelkada/fonts'''
def a_ ( ) -> List[str]:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'Pix2StructImageProcessor. Please upgrade torch.' )
def a_ ( __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Optional[Any] ) -> List[str]:
requires_backends(__lowercase , ['torch'] )
_check_torch_version()
_snake_case = image_tensor.unsqueeze(0 )
_snake_case = torch.nn.functional.unfold(__lowercase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_snake_case = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __lowercase , __lowercase , -1 )
_snake_case = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def a_ ( __lowercase : str , __lowercase : int = 36 , __lowercase : str = "black" , __lowercase : str = "white" , __lowercase : int = 5 , __lowercase : int = 5 , __lowercase : int = 5 , __lowercase : int = 5 , __lowercase : Optional[bytes] = None , __lowercase : Optional[str] = None , ) -> Image.Image:
requires_backends(__lowercase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
_snake_case = textwrap.TextWrapper(width=80 )
_snake_case = wrapper.wrap(text=__lowercase )
_snake_case = '\n'.join(__lowercase )
if font_bytes is not None and font_path is None:
_snake_case = io.BytesIO(__lowercase )
elif font_path is not None:
_snake_case = font_path
else:
_snake_case = hf_hub_download(__lowercase , 'Arial.TTF' )
_snake_case = ImageFont.truetype(__lowercase , encoding='UTF-8' , size=__lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_snake_case = ImageDraw.Draw(Image.new('RGB' , (1, 1) , __lowercase ) )
_snake_case , _snake_case , _snake_case , _snake_case = temp_draw.textbbox((0, 0) , __lowercase , __lowercase )
# Create the actual image with a bit of padding around the text.
_snake_case = text_width + left_padding + right_padding
_snake_case = text_height + top_padding + bottom_padding
_snake_case = Image.new('RGB' , (image_width, image_height) , __lowercase )
_snake_case = ImageDraw.Draw(__lowercase )
draw.text(xy=(left_padding, top_padding) , text=__lowercase , fill=__lowercase , font=__lowercase )
return image
def a_ ( __lowercase : np.ndarray , __lowercase : str , **__lowercase : Tuple ) -> Tuple:
requires_backends(__lowercase , 'vision' )
# Convert to PIL image if necessary
_snake_case = to_pil_image(__lowercase )
_snake_case = render_text(__lowercase , **__lowercase )
_snake_case = max(header_image.width , image.width )
_snake_case = int(image.height * (new_width / image.width) )
_snake_case = int(header_image.height * (new_width / header_image.width) )
_snake_case = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_snake_case = to_numpy_array(__lowercase )
if infer_channel_dimension_format(__lowercase ) == ChannelDimension.LAST:
_snake_case = to_channel_dimension_format(__lowercase , ChannelDimension.LAST )
return new_image
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = ["flattened_patches"]
def __init__( self : int , lowercase : bool = True , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : int = 2_048 , lowercase : bool = False , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = patch_size if patch_size is not None else {'height': 16, 'width': 16}
_snake_case = do_normalize
_snake_case = do_convert_rgb
_snake_case = max_patches
_snake_case = is_vqa
def A ( self : List[Any] , lowercase : np.ndarray , lowercase : int , lowercase : dict , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
_snake_case = to_channel_dimension_format(lowercase , ChannelDimension.FIRST )
_snake_case = torch.from_numpy(lowercase )
_snake_case , _snake_case = patch_size['height'], patch_size['width']
_snake_case , _snake_case = get_image_size(lowercase )
# maximize scale s.t.
_snake_case = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_snake_case = max(min(math.floor(scale * image_height / patch_height ) , lowercase ) , 1 )
_snake_case = max(min(math.floor(scale * image_width / patch_width ) , lowercase ) , 1 )
_snake_case = max(num_feasible_rows * patch_height , 1 )
_snake_case = max(num_feasible_cols * patch_width , 1 )
_snake_case = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=lowercase , antialias=lowercase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_snake_case = torch_extract_patches(lowercase , lowercase , lowercase )
_snake_case = patches.shape
_snake_case = patches_shape[1]
_snake_case = patches_shape[2]
_snake_case = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_snake_case = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_snake_case = torch.arange(lowercase ).reshape([rows, 1] ).repeat(1 , lowercase ).reshape([rows * columns, 1] )
_snake_case = torch.arange(lowercase ).reshape([1, columns] ).repeat(lowercase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_snake_case = row_ids.to(torch.floataa )
_snake_case = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_snake_case = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_snake_case = torch.nn.functional.pad(lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_snake_case = to_numpy_array(lowercase )
return result
def A ( self : Dict , lowercase : np.ndarray , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Tuple ):
'''simple docstring'''
if image.dtype == np.uinta:
_snake_case = image.astype(np.floataa )
# take mean across the whole `image`
_snake_case = np.mean(lowercase )
_snake_case = np.std(lowercase )
_snake_case = max(lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase , mean=lowercase , std=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[str] = None , lowercase : bool = None , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Dict[str, int]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : int , ):
'''simple docstring'''
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = patch_size if patch_size is not None else self.patch_size
_snake_case = max_patches if max_patches is not None else self.max_patches
_snake_case = self.is_vqa
if kwargs.get('data_format' , lowercase ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
_snake_case = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowercase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
_snake_case = kwargs.pop('font_bytes' , lowercase )
_snake_case = kwargs.pop('font_path' , lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = [header_text] * len(lowercase )
_snake_case = [
render_header(lowercase , header_text[i] , font_bytes=lowercase , font_path=lowercase )
for i, image in enumerate(lowercase )
]
if do_normalize:
_snake_case = [self.normalize(image=lowercase ) for image in images]
# convert to torch tensor and permute
_snake_case = [
self.extract_flattened_patches(image=lowercase , max_patches=lowercase , patch_size=lowercase )
for image in images
]
# create attention mask in numpy
_snake_case = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_snake_case = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=lowercase )
return encoded_outputs | 282 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase : Any = logging.getLogger(__name__)
_lowerCamelCase : int = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCamelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase )} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
_UpperCAmelCase : bool = field(default=UpperCAmelCase ,metadata={"help": "Whether ot not to use whole word mask."} )
_UpperCAmelCase : float = field(
default=0.15 ,metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_UpperCAmelCase : float = field(
default=1 / 6 ,metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} ,)
_UpperCAmelCase : int = field(
default=5 ,metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
_UpperCAmelCase : int = field(
default=-1 ,metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def a_ ( __lowercase : DataTrainingArguments , __lowercase : PreTrainedTokenizer , __lowercase : bool = False , __lowercase : Optional[str] = None , ) -> str:
def _dataset(__lowercase : str , __lowercase : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , ref_path=__lowercase , )
return LineByLineTextDataset(tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_snake_case = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_snake_case = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
_snake_case = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_snake_case = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
_snake_case = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
_snake_case = AutoModelWithLMHead.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
_snake_case = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_snake_case = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_snake_case = (
get_dataset(__lowercase , tokenizer=__lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_snake_case = (
get_dataset(__lowercase , tokenizer=__lowercase , evaluate=__lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_snake_case = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_snake_case = DataCollatorForWholeWordMask(
tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
else:
_snake_case = DataCollatorForLanguageModeling(
tokenizer=__lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_snake_case = Trainer(
model=__lowercase , args=__lowercase , data_collator=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , prediction_loss_only=__lowercase , )
# Training
if training_args.do_train:
_snake_case = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case = trainer.evaluate()
_snake_case = math.exp(eval_output['eval_loss'] )
_snake_case = {'perplexity': perplexity}
_snake_case = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(__lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __lowercase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(__lowercase )
return results
def a_ ( __lowercase : List[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 282 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
_lowerCamelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase : Optional[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase : Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 282 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 1 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 1 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
_snake_case = nums[0]
for i in range(1 , len(__lowercase ) ):
_snake_case = nums[i]
_snake_case = max(__lowercase , ans + num , __lowercase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowerCamelCase : List[Any] = int(input('''Enter number of elements : ''').strip())
_lowerCamelCase : List[Any] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array)) | 282 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = "segformer"
def __init__( self : List[str] , lowercase : Tuple=3 , lowercase : Optional[int]=4 , lowercase : Tuple=[2, 2, 2, 2] , lowercase : List[Any]=[8, 4, 2, 1] , lowercase : List[Any]=[32, 64, 160, 256] , lowercase : List[str]=[7, 3, 3, 3] , lowercase : List[str]=[4, 2, 2, 2] , lowercase : Tuple=[1, 2, 5, 8] , lowercase : str=[4, 4, 4, 4] , lowercase : Union[str, Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Union[str, Any]=0.0 , lowercase : Optional[int]=0.1 , lowercase : Dict=0.02 , lowercase : Dict=0.1 , lowercase : Dict=1E-6 , lowercase : List[Any]=256 , lowercase : Optional[Any]=255 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase , )
_snake_case = num_channels
_snake_case = num_encoder_blocks
_snake_case = depths
_snake_case = sr_ratios
_snake_case = hidden_sizes
_snake_case = patch_sizes
_snake_case = strides
_snake_case = mlp_ratios
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = classifier_dropout_prob
_snake_case = initializer_range
_snake_case = drop_path_rate
_snake_case = layer_norm_eps
_snake_case = decoder_hidden_size
_snake_case = kwargs.get('reshape_last_stage' , lowercase )
_snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = version.parse("1.11" )
@property
def A ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : List[str] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : int ):
'''simple docstring'''
return 12 | 282 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Tuple = logging.getLogger()
def a_ ( __lowercase : Path , __lowercase : list ) -> List[str]:
_snake_case = '\n'.join(__lowercase )
Path(__lowercase ).open('w' ).writelines(__lowercase )
_lowerCamelCase : Dict = '''patrickvonplaten/t5-tiny-random'''
_lowerCamelCase : Tuple = '''sshleifer/bart-tiny-random'''
_lowerCamelCase : Union[str, Any] = '''sshleifer/tiny-mbart'''
_lowerCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : str , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowercase , lowercase )
_snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
run_generate()
assert Path(lowercase ).exists()
# os.remove(Path(output_file_name))
def A ( self : Tuple ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A ( self : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A ( self : Optional[int] , lowercase : Any ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_snake_case = Path(self.get_auto_remove_tmp_dir() )
_snake_case = str(tmp_dir / 'scores.json' )
_snake_case = str(tmp_dir / 'val.target' )
_dump_articles(lowercase , text['en'] )
_dump_articles(lowercase , text['de'] )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{str(lowercase )}
{str(lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowercase , 'argv' , lowercase ):
with CaptureStdout() as cs:
run_search()
_snake_case = [' num_beams | length_penalty', model, 'Best score args']
_snake_case = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase ).exists()
os.remove(Path(lowercase ) ) | 282 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 1 |
def a_ ( __lowercase : int = 1_000_000 ) -> int:
_snake_case = set(range(3 , __lowercase , 2 ) )
primes.add(2 )
for p in range(3 , __lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __lowercase , __lowercase ) ) )
_snake_case = [float(__lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(__lowercase , limit + 1 , __lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }') | 282 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Tuple = logging.get_logger(__name__)
def a_ ( __lowercase : Dict ) -> Dict:
_snake_case = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_snake_case = DetaConfig(
backbone_config=__lowercase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__lowercase , with_box_refine=__lowercase , two_stage=__lowercase , )
# set labels
_snake_case = 'huggingface/label-files'
if "o365" in model_name:
_snake_case = 366
_snake_case = 'object365-id2label.json'
else:
_snake_case = 91
_snake_case = 'coco-detection-id2label.json'
_snake_case = num_labels
_snake_case = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( __lowercase : List[Any] ) -> List[Any]:
_snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def a_ ( __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Optional[Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( __lowercase : int , __lowercase : Optional[Any] ) -> Optional[int]:
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Union[str, Any]:
# transformer decoder self-attention layers
_snake_case = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:hidden_size, :]
_snake_case = in_proj_bias[:hidden_size]
_snake_case = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_snake_case = in_proj_bias[hidden_size : hidden_size * 2]
_snake_case = in_proj_weight[-hidden_size:, :]
_snake_case = in_proj_bias[-hidden_size:]
def a_ ( ) -> Tuple:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Dict ) -> List[str]:
_snake_case = get_deta_config(__lowercase )
# load original state dict
if model_name == "deta-swin-large":
_snake_case = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_snake_case = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__lowercase , param.shape )
# rename keys
_snake_case = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "input_proj" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_snake_case = state_dict.pop(__lowercase )
_snake_case = val
# finally, create HuggingFace model and load state dict
_snake_case = DetaForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__lowercase )
# load image processor
_snake_case = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_snake_case = prepare_img()
_snake_case = processor(images=__lowercase , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(pixel_values.to(__lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_snake_case = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_snake_case = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_snake_case = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_snake_case = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowercase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowercase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 282 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : Any , lowercase : Dict=7 , lowercase : List[str]=3 , lowercase : List[str]=18 , lowercase : List[Any]=30 , lowercase : List[str]=400 , lowercase : Union[str, Any]=True , lowercase : Tuple=None , lowercase : Optional[int]=True , lowercase : Dict=None , lowercase : int=True , lowercase : Any=[0.48145466, 0.4578275, 0.40821073] , lowercase : List[str]=[0.26862954, 0.26130258, 0.27577711] , lowercase : List[Any]=True , ):
'''simple docstring'''
_snake_case = size if size is not None else {'height': 224, 'width': 224}
_snake_case = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_convert_rgb
def A ( self : List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def A ( self : str , lowercase : Dict=False , lowercase : Dict=False , lowercase : List[str]=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_snake_case = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_snake_case = []
for i in range(self.batch_size ):
_snake_case , _snake_case = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_snake_case = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_snake_case = [torch.from_numpy(lowercase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def A ( self : Dict ):
'''simple docstring'''
_snake_case = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase , 'center_crop' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_convert_rgb' ) )
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def A ( self : str ):
'''simple docstring'''
_snake_case = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase )
_snake_case = 3
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase , 'center_crop' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_convert_rgb' ) )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 282 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 1 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = tempfile.mkdtemp()
_snake_case = SamImageProcessor()
_snake_case = SamProcessor(lowercase )
processor.save_pretrained(self.tmpdirname )
def A ( self : int , **lowercase : str ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def A ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Dict ):
'''simple docstring'''
_snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
_snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.get_image_processor()
_snake_case = SamProcessor(image_processor=lowercase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(lowercase , return_tensors='np' )
_snake_case = processor(images=lowercase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_image_processor()
_snake_case = SamProcessor(image_processor=lowercase )
_snake_case = [torch.ones((1, 3, 5, 5) )]
_snake_case = [[1_764, 2_646]]
_snake_case = [[683, 1_024]]
_snake_case = processor.post_process_masks(lowercase , lowercase , lowercase )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
_snake_case = processor.post_process_masks(
lowercase , torch.tensor(lowercase ) , torch.tensor(lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
_snake_case = [np.ones((1, 3, 5, 5) )]
_snake_case = processor.post_process_masks(lowercase , np.array(lowercase ) , np.array(lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
_snake_case = [[1, 0], [0, 1]]
with self.assertRaises(lowercase ):
_snake_case = processor.post_process_masks(lowercase , np.array(lowercase ) , np.array(lowercase ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = tempfile.mkdtemp()
_snake_case = SamImageProcessor()
_snake_case = SamProcessor(lowercase )
processor.save_pretrained(self.tmpdirname )
def A ( self : str , **lowercase : int ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def A ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : Any ):
'''simple docstring'''
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
_snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.get_image_processor()
_snake_case = SamProcessor(image_processor=lowercase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(lowercase , return_tensors='np' )
_snake_case = processor(images=lowercase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.get_image_processor()
_snake_case = SamProcessor(image_processor=lowercase )
_snake_case = [tf.ones((1, 3, 5, 5) )]
_snake_case = [[1_764, 2_646]]
_snake_case = [[683, 1_024]]
_snake_case = processor.post_process_masks(lowercase , lowercase , lowercase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
_snake_case = processor.post_process_masks(
lowercase , tf.convert_to_tensor(lowercase ) , tf.convert_to_tensor(lowercase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
_snake_case = [np.ones((1, 3, 5, 5) )]
_snake_case = processor.post_process_masks(
lowercase , np.array(lowercase ) , np.array(lowercase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
_snake_case = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_snake_case = processor.post_process_masks(
lowercase , np.array(lowercase ) , np.array(lowercase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = tempfile.mkdtemp()
_snake_case = SamImageProcessor()
_snake_case = SamProcessor(lowercase )
processor.save_pretrained(self.tmpdirname )
def A ( self : Any , **lowercase : List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def A ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.get_image_processor()
_snake_case = SamProcessor(image_processor=lowercase )
_snake_case = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_snake_case = [tf.convert_to_tensor(lowercase )]
_snake_case = [torch.tensor(lowercase )]
_snake_case = [[1_764, 2_646]]
_snake_case = [[683, 1_024]]
_snake_case = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='tf' )
_snake_case = processor.post_process_masks(
lowercase , lowercase , lowercase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.get_image_processor()
_snake_case = SamProcessor(image_processor=lowercase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(lowercase , return_tensors='pt' )['pixel_values'].numpy()
_snake_case = processor(images=lowercase , return_tensors='pt' )['pixel_values'].numpy()
_snake_case = image_processor(lowercase , return_tensors='tf' )['pixel_values'].numpy()
_snake_case = processor(images=lowercase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(lowercase , lowercase ) )
self.assertTrue(np.allclose(lowercase , lowercase ) )
self.assertTrue(np.allclose(lowercase , lowercase ) ) | 282 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : Dict = 256
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = ["melgan"]
def __init__( self : Dict , lowercase : SpectrogramNotesEncoder , lowercase : SpectrogramContEncoder , lowercase : TaFilmDecoder , lowercase : DDPMScheduler , lowercase : OnnxRuntimeModel if is_onnx_available() else Any , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_snake_case = math.log(1E-5 ) # Matches MelGAN training.
_snake_case = 4.0 # Largest value for most examples
_snake_case = 128
self.register_modules(
notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , )
def A ( self : Any , lowercase : int , lowercase : Dict=(-1.0, 1.0) , lowercase : Dict=False ):
'''simple docstring'''
_snake_case , _snake_case = output_range
if clip:
_snake_case = torch.clip(lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
_snake_case = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : str=(-1.0, 1.0) , lowercase : List[str]=False ):
'''simple docstring'''
_snake_case , _snake_case = input_range
_snake_case = torch.clip(lowercase , lowercase , lowercase ) if clip else outputs
# Scale to [0, 1].
_snake_case = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = input_tokens > 0
_snake_case , _snake_case = self.notes_encoder(
encoder_input_tokens=lowercase , encoder_inputs_mask=lowercase )
_snake_case , _snake_case = self.continuous_encoder(
encoder_inputs=lowercase , encoder_inputs_mask=lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A ( self : Optional[int] , lowercase : List[str] , lowercase : List[Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = noise_time
if not torch.is_tensor(lowercase ):
_snake_case = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
_snake_case = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_snake_case = self.decoder(
encodings_and_masks=lowercase , decoder_input_tokens=lowercase , decoder_noise_time=lowercase )
return logits
@torch.no_grad()
def __call__( self : List[Any] , lowercase : List[List[int]] , lowercase : Optional[torch.Generator] = None , lowercase : int = 100 , lowercase : bool = True , lowercase : str = "numpy" , lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase : int = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowercase )}.''' )
_snake_case = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_snake_case = np.zeros([1, 0, self.n_dims] , np.floataa )
_snake_case = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase ):
if i == 0:
_snake_case = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_snake_case = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_snake_case = ones
_snake_case = self.scale_features(
lowercase , output_range=[-1.0, 1.0] , clip=lowercase )
_snake_case = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase , continuous_mask=lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_snake_case = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_snake_case = self.decode(
encodings_and_masks=lowercase , input_tokens=lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_snake_case = self.scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_snake_case = self.scale_to_features(lowercase , input_range=[-1.0, 1.0] )
_snake_case = mel[:1]
_snake_case = mel.cpu().float().numpy()
_snake_case = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase )
logger.info('Generated segment' , lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
_snake_case = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_snake_case = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase ) | 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_lowerCamelCase : List[Any] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCamelCase : Optional[Any] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
_lowerCamelCase : List[str] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def a_ ( __lowercase : int ) -> int:
def remove_articles(__lowercase : Union[str, Any] ):
_snake_case = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(__lowercase , ' ' , __lowercase )
def white_space_fix(__lowercase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__lowercase : Any ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def a_ ( __lowercase : List[str] , __lowercase : Any ) -> List[Any]:
return int(normalize_answer(__lowercase ) == normalize_answer(__lowercase ) )
def a_ ( __lowercase : List[str] , __lowercase : int ) -> Optional[Any]:
_snake_case = [any(compute_exact(__lowercase , __lowercase ) for ref in refs ) for pred, refs in zip(__lowercase , __lowercase )]
return (sum(__lowercase ) / len(__lowercase )) * 100
def a_ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : int , __lowercase : int ) -> List[Any]:
_snake_case = [rgram for rgrams in rgramslist for rgram in rgrams]
_snake_case = Counter(__lowercase )
_snake_case = Counter(__lowercase )
_snake_case = Counter()
for sgram, scount in sgramcounter.items():
_snake_case = scount * numref
_snake_case = Counter(__lowercase )
_snake_case = Counter()
for cgram, ccount in cgramcounter.items():
_snake_case = ccount * numref
# KEEP
_snake_case = sgramcounter_rep & cgramcounter_rep
_snake_case = keepgramcounter_rep & rgramcounter
_snake_case = sgramcounter_rep & rgramcounter
_snake_case = 0
_snake_case = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case = 1
_snake_case = 1
if len(__lowercase ) > 0:
_snake_case = keeptmpscorea / len(__lowercase )
if len(__lowercase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_snake_case = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_snake_case = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_snake_case = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_snake_case = sgramcounter_rep - cgramcounter_rep
_snake_case = delgramcounter_rep - rgramcounter
_snake_case = sgramcounter_rep - rgramcounter
_snake_case = 0
_snake_case = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case = 1
if len(__lowercase ) > 0:
_snake_case = deltmpscorea / len(__lowercase )
# ADDITION
_snake_case = set(__lowercase ) - set(__lowercase )
_snake_case = set(__lowercase ) & set(__lowercase )
_snake_case = set(__lowercase ) - set(__lowercase )
_snake_case = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case = 1
_snake_case = 1
if len(__lowercase ) > 0:
_snake_case = addtmpscore / len(__lowercase )
if len(__lowercase ) > 0:
_snake_case = addtmpscore / len(__lowercase )
_snake_case = 0
if addscore_precision > 0 or addscore_recall > 0:
_snake_case = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a_ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : Tuple ) -> Optional[int]:
_snake_case = len(__lowercase )
_snake_case = ssent.split(' ' )
_snake_case = csent.split(' ' )
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
for rsent in rsents:
_snake_case = rsent.split(' ' )
_snake_case = []
_snake_case = []
_snake_case = []
ragramslist.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
_snake_case = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(__lowercase )
if i < len(__lowercase ) - 2:
_snake_case = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(__lowercase )
if i < len(__lowercase ) - 3:
_snake_case = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(__lowercase )
ragramslist.append(__lowercase )
ragramslist.append(__lowercase )
ragramslist.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
_snake_case = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(__lowercase )
if i < len(__lowercase ) - 2:
_snake_case = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(__lowercase )
if i < len(__lowercase ) - 3:
_snake_case = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
_snake_case = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(__lowercase )
if i < len(__lowercase ) - 2:
_snake_case = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(__lowercase )
if i < len(__lowercase ) - 3:
_snake_case = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(__lowercase )
((_snake_case) , (_snake_case) , (_snake_case)) = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((_snake_case) , (_snake_case) , (_snake_case)) = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((_snake_case) , (_snake_case) , (_snake_case)) = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((_snake_case) , (_snake_case) , (_snake_case)) = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
_snake_case = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_snake_case = sum([delascore, delascore, delascore, delascore] ) / 4
_snake_case = sum([addascore, addascore, addascore, addascore] ) / 4
_snake_case = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a_ ( __lowercase : List[str] , __lowercase : bool = True , __lowercase : str = "13a" , __lowercase : bool = True ) -> Tuple:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_snake_case = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_snake_case = sacrebleu.metrics.bleu._get_tokenizer(__lowercase )()(__lowercase )
else:
_snake_case = sacrebleu.TOKENIZERS[tokenizer]()(__lowercase )
elif tokenizer == "moses":
_snake_case = sacremoses.MosesTokenizer().tokenize(__lowercase , return_str=__lowercase , escape=__lowercase )
elif tokenizer == "penn":
_snake_case = sacremoses.MosesTokenizer().penn_tokenize(__lowercase , return_str=__lowercase )
else:
_snake_case = sentence
if not return_str:
_snake_case = normalized_sent.split()
return normalized_sent
def a_ ( __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Tuple ) -> Dict:
if not (len(__lowercase ) == len(__lowercase ) == len(__lowercase )):
raise ValueError('Sources length must match predictions and references lengths.' )
_snake_case = 0
for src, pred, refs in zip(__lowercase , __lowercase , __lowercase ):
sari_score += SARIsent(normalize(__lowercase ) , normalize(__lowercase ) , [normalize(__lowercase ) for sent in refs] )
_snake_case = sari_score / len(__lowercase )
return 100 * sari_score
def a_ ( __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Tuple="exp" , __lowercase : Union[str, Any]=None , __lowercase : Optional[Any]=False , __lowercase : Any=False , __lowercase : Tuple=False , ) -> List[str]:
_snake_case = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case = [[refs[i] for refs in references] for i in range(__lowercase )]
_snake_case = sacrebleu.corpus_bleu(
__lowercase , __lowercase , smooth_method=__lowercase , smooth_value=__lowercase , force=__lowercase , lowercase=__lowercase , use_effective_order=__lowercase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def A ( self : Any , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = {}
result.update({'sari': compute_sari(sources=lowercase , predictions=lowercase , references=lowercase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=lowercase , references=lowercase )} )
result.update({'exact': compute_em(predictions=lowercase , references=lowercase )} )
return result | 282 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : bytes ):
'''simple docstring'''
_snake_case = data
# Initialize hash values
_snake_case = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
_snake_case = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
_snake_case = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A ( lowercase : bytes ):
'''simple docstring'''
_snake_case = B'\x80' + (B'\x00' * (63 - (len(lowercase ) + 8) % 64))
_snake_case = struct.pack('>Q' , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def A ( self : str ):
'''simple docstring'''
_snake_case = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_snake_case = list(struct.unpack('>16L' , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_snake_case = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_snake_case = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_snake_case = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
_snake_case = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
_snake_case = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
_snake_case = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
_snake_case = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
_snake_case = (a & b) ^ (a & c) ^ (b & c)
_snake_case = (sa + maj) % 0x100000000
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
_snake_case = [a, b, c, d, e, f, g, h]
# Modify final values
_snake_case = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
_snake_case = ''.join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def A ( self : List[str] , lowercase : int , lowercase : int ):
'''simple docstring'''
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
import hashlib
_snake_case = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def a_ ( ) -> None:
import doctest
doctest.testmod()
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_snake_case = parser.parse_args()
_snake_case = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_snake_case = f.read()
else:
_snake_case = bytes(__lowercase , 'utf-8' )
print(SHAaaa(__lowercase ).hash )
if __name__ == "__main__":
main() | 282 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 1 |
from __future__ import annotations
from typing import Any
def a_ ( __lowercase : list ) -> int:
if not postfix_notation:
return 0
_snake_case = {'+', '-', '*', '/'}
_snake_case = []
for token in postfix_notation:
if token in operations:
_snake_case , _snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = (IPNDMScheduler,)
_UpperCAmelCase : str = (("num_inference_steps", 5_0),)
def A ( self : Tuple , **lowercase : Any ):
'''simple docstring'''
_snake_case = {'num_train_timesteps': 1_000}
config.update(**lowercase )
return config
def A ( self : Optional[int] , lowercase : List[Any]=0 , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop('num_inference_steps' , lowercase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config(**lowercase )
_snake_case = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
_snake_case = dummy_past_residuals[:]
if time_step is None:
_snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
_snake_case = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
_snake_case = dummy_past_residuals[:]
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : int ):
'''simple docstring'''
pass
def A ( self : Tuple , lowercase : Tuple=0 , **lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop('num_inference_steps' , lowercase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case = dummy_past_residuals[:]
if time_step is None:
_snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
_snake_case = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
_snake_case = dummy_past_residuals[:]
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(**lowercase )
_snake_case = scheduler_class(**lowercase )
_snake_case = 10
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = model(lowercase , lowercase )
_snake_case = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case = model(lowercase , lowercase )
_snake_case = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = dict(self.forward_default_kwargs )
_snake_case = kwargs.pop('num_inference_steps' , lowercase )
for scheduler_class in self.scheduler_classes:
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowercase )
_snake_case = self.dummy_sample
_snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , 'set_timesteps' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , 'set_timesteps' ):
_snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case = dummy_past_residuals[:]
_snake_case = scheduler.timesteps[5]
_snake_case = scheduler.timesteps[6]
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_snake_case = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase , time_step=lowercase )
def A ( self : int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase , time_step=lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.full_loop()
_snake_case = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10 | 282 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 1 |
import numpy
# List of input, output pairs
_lowerCamelCase : Tuple = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowerCamelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_lowerCamelCase : Union[str, Any] = [2, 4, 1, 5]
_lowerCamelCase : Tuple = len(train_data)
_lowerCamelCase : Tuple = 0.0_0_9
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple="train" ) -> Union[str, Any]:
return calculate_hypothesis_value(__lowercase , __lowercase ) - output(
__lowercase , __lowercase )
def a_ ( __lowercase : int ) -> Union[str, Any]:
_snake_case = 0
for i in range(len(__lowercase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a_ ( __lowercase : Optional[Any] , __lowercase : Union[str, Any] ) -> int:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a_ ( __lowercase : List[Any] , __lowercase : Union[str, Any] ) -> Any:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a_ ( __lowercase : str , __lowercase : List[Any]=m ) -> int:
_snake_case = 0
for i in range(__lowercase ):
if index == -1:
summation_value += _error(__lowercase )
else:
summation_value += _error(__lowercase ) * train_data[i][0][index]
return summation_value
def a_ ( __lowercase : List[str] ) -> Dict:
_snake_case = summation_of_cost_derivative(__lowercase , __lowercase ) / m
return cost_derivative_value
def a_ ( ) -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_snake_case = 0.0_0_0_0_0_2
_snake_case = 0
_snake_case = 0
while True:
j += 1
_snake_case = [0, 0, 0, 0]
for i in range(0 , len(__lowercase ) ):
_snake_case = get_cost_derivative(i - 1 )
_snake_case = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowercase , __lowercase , atol=__lowercase , rtol=__lowercase , ):
break
_snake_case = temp_parameter_vector
print(('Number of iterations:', j) )
def a_ ( ) -> List[str]:
for i in range(len(__lowercase ) ):
print(('Actual output value:', output(__lowercase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(__lowercase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent() | 282 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : List[Any]="attention" ) -> Tuple:
_snake_case = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
_snake_case = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
_snake_case = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
_snake_case = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any]=False ) -> List[str]:
if split_mlp_wi:
_snake_case = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
_snake_case = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
_snake_case = (wi_a, wi_a)
else:
_snake_case = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
_snake_case = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def a_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : List[Any] , __lowercase : Dict ) -> str:
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def a_ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool ) -> Union[str, Any]:
_snake_case = traverse_util.flatten_dict(variables['target'] )
_snake_case = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
_snake_case = collections.OrderedDict()
# Shared embeddings.
_snake_case = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (MLP).
_snake_case = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
_snake_case = old[
'encoder/relpos_bias/rel_embedding'
].T
_snake_case = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (Cross Attention).
_snake_case = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 2 (MLP).
_snake_case = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
_snake_case = old['decoder/decoder_norm/scale']
_snake_case = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case = old['decoder/logits_dense/kernel'].T
return new
def a_ ( __lowercase : Union[str, Any] , __lowercase : bool ) -> Optional[int]:
_snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_snake_case = state_dict['shared.weight']
return state_dict
def a_ ( __lowercase : List[str] , __lowercase : int , __lowercase : List[str] , __lowercase : int ) -> List[Any]:
_snake_case = checkpoints.load_tax_checkpoint(__lowercase )
_snake_case = convert_tax_to_pytorch(__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase )
_snake_case = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def a_ ( __lowercase : Dict , __lowercase : Tuple , __lowercase : Dict , __lowercase : bool = False ) -> Optional[Any]:
_snake_case = TaConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case = TaEncoderModel(__lowercase )
else:
_snake_case = TaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
_lowerCamelCase : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 282 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[Union[str, Path]] = None
_UpperCAmelCase : bool = False
_UpperCAmelCase : bool = False
_UpperCAmelCase : bool = False
_UpperCAmelCase : Optional[Dict] = None
_UpperCAmelCase : Optional[str] = None
_UpperCAmelCase : bool = False
_UpperCAmelCase : bool = False
_UpperCAmelCase : bool = False
_UpperCAmelCase : bool = True
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : int = 1
_UpperCAmelCase : Optional[Union[str, bool]] = None
_UpperCAmelCase : bool = False
_UpperCAmelCase : Optional[Dict] = None
_UpperCAmelCase : Optional[str] = None
def A ( self : int ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowercase ) for k, v in self.__dict__.items()} ) | 282 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Optional[int] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
_lowerCamelCase : Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Tuple = RealmTokenizer
def __init__( self : Optional[Any] , lowercase : str=None , lowercase : List[str]=None , lowercase : Union[str, Any]=True , lowercase : str="[UNK]" , lowercase : List[Any]="[SEP]" , lowercase : Optional[Any]="[PAD]" , lowercase : Optional[int]="[CLS]" , lowercase : Any="[MASK]" , lowercase : Any=True , lowercase : Union[str, Any]=None , **lowercase : int , ):
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
_snake_case = getattr(lowercase , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowercase )
_snake_case = do_lower_case
def A ( self : Optional[int] , lowercase : int , **lowercase : Tuple ):
'''simple docstring'''
_snake_case = PaddingStrategy.MAX_LENGTH
_snake_case = text
_snake_case = kwargs.pop('text_pair' , lowercase )
_snake_case = kwargs.pop('return_tensors' , lowercase )
_snake_case = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(lowercase ):
if batch_text_pair is not None:
_snake_case = batch_text_pair[idx]
else:
_snake_case = None
_snake_case = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase )
_snake_case = encoded_candidates.get('input_ids' )
_snake_case = encoded_candidates.get('attention_mask' )
_snake_case = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowercase )
_snake_case = {key: item for key, item in output_data.items() if len(lowercase ) != 0}
return BatchEncoding(lowercase , tensor_type=lowercase )
def A ( self : str , lowercase : List[Any] , lowercase : int=None ):
'''simple docstring'''
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Any , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase ) | 282 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import operator as op
_lowerCamelCase : Optional[int] = '''scaler.pt'''
_lowerCamelCase : Optional[int] = '''pytorch_model'''
_lowerCamelCase : List[Any] = '''random_states'''
_lowerCamelCase : int = '''optimizer'''
_lowerCamelCase : Dict = '''scheduler'''
_lowerCamelCase : Optional[int] = '''pytorch_model.bin'''
_lowerCamelCase : Any = '''pytorch_model.bin.index.json'''
_lowerCamelCase : Optional[Any] = '''model.safetensors'''
_lowerCamelCase : Optional[Any] = '''model.safetensors.index.json'''
_lowerCamelCase : Optional[Any] = '''1.10.2'''
_lowerCamelCase : List[str] = '''py38'''
_lowerCamelCase : str = '''4.17.0'''
_lowerCamelCase : List[str] = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
_lowerCamelCase : Optional[Any] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
_lowerCamelCase : Dict = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
_lowerCamelCase : List[Any] = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
_lowerCamelCase : List[str] = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
_lowerCamelCase : List[str] = '''2.0.1'''
_lowerCamelCase : str = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
_lowerCamelCase : int = ['''default''', '''reduce-overhead''', '''max-autotune''']
_lowerCamelCase : Optional[int] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowerCamelCase : Optional[int] = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
_lowerCamelCase : str = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
_lowerCamelCase : Dict = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP'''] | 282 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase : Dict = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 282 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a_ ( __lowercase : List[Any]=None , __lowercase : List[Any]=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=__lowercase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : str = field(
metadata={"help": "The csv file to plot."} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Disable logarithmic scale when plotting"} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} ,)
_UpperCAmelCase : Optional[List[str]] = list_field(
default=UpperCAmelCase ,metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def a_ ( __lowercase : Tuple ) -> Optional[int]:
try:
int(__lowercase )
return True
except ValueError:
return False
def a_ ( __lowercase : Tuple ) -> List[str]:
try:
float(__lowercase )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : Dict ):
'''simple docstring'''
_snake_case = args
_snake_case = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_snake_case = csv.DictReader(lowercase )
for row in reader:
_snake_case = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_snake_case = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_snake_case = float(row['result'] )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = plt.subplots()
_snake_case = 'Time usage' if self.args.is_time else 'Memory usage'
_snake_case = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_snake_case = sorted(set(self.result_dict[model_name]['bsz'] ) )
_snake_case = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_snake_case = self.result_dict[model_name]['result']
((_snake_case) , (_snake_case)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_snake_case = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_snake_case = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowercase , )
else:
_snake_case = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_snake_case) , (_snake_case)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_snake_case = np.asarray(lowercase , lowercase )[: len(lowercase )]
plt.scatter(
lowercase , lowercase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowercase , lowercase , '--' )
title_str += f''' {label_model_name} vs.'''
_snake_case = title_str[:-4]
_snake_case = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowercase )
plt.xlabel(lowercase )
plt.ylabel(lowercase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a_ ( ) -> Dict:
_snake_case = HfArgumentParser(__lowercase )
_snake_case = parser.parse_args_into_dataclasses()[0]
_snake_case = Plot(args=__lowercase )
plot.plot()
if __name__ == "__main__":
main() | 282 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : int = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 282 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {'''vocab_file''': '''vocab.txt'''}
_lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
_lowerCamelCase : Any = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
_lowerCamelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ConvBertTokenizer
def __init__( self : int , lowercase : str=None , lowercase : Optional[int]=None , lowercase : int=True , lowercase : int="[UNK]" , lowercase : Dict="[SEP]" , lowercase : str="[PAD]" , lowercase : Tuple="[CLS]" , lowercase : List[Any]="[MASK]" , lowercase : str=True , lowercase : Union[str, Any]=None , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
_snake_case = getattr(lowercase , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowercase )
_snake_case = do_lower_case
def A ( self : Tuple , lowercase : str , lowercase : Optional[int]=None ):
'''simple docstring'''
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : List[str] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase ) | 282 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : str ):
'''simple docstring'''
_snake_case = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_snake_case = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowercase ) , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowercase ) , x.transpose() ) )
_snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = torch.tensor(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) )
_snake_case = np.random.randn(3 , 4 , 5 )
_snake_case = torch.tensor(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : int ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = tf.constant(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , transpose(lowercase ).numpy() ) )
_snake_case = np.random.randn(3 , 4 , 5 )
_snake_case = tf.constant(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , transpose(lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : str ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = jnp.array(lowercase )
self.assertTrue(np.allclose(transpose(lowercase ) , np.asarray(transpose(lowercase ) ) ) )
_snake_case = np.random.randn(3 , 4 , 5 )
_snake_case = jnp.array(lowercase )
self.assertTrue(np.allclose(transpose(lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase , axes=(1, 2, 0) ) ) ) )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.reshape(lowercase , (4, 3) ) ) )
_snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , np.reshape(lowercase , (12, 5) ) ) )
@require_torch
def A ( self : str ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = torch.tensor(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) )
_snake_case = np.random.randn(3 , 4 , 5 )
_snake_case = torch.tensor(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , reshape(lowercase , (12, 5) ).numpy() ) )
@require_tf
def A ( self : Dict ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = tf.constant(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , reshape(lowercase , (4, 3) ).numpy() ) )
_snake_case = np.random.randn(3 , 4 , 5 )
_snake_case = tf.constant(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , reshape(lowercase , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = jnp.array(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (4, 3) ) , np.asarray(reshape(lowercase , (4, 3) ) ) ) )
_snake_case = np.random.randn(3 , 4 , 5 )
_snake_case = jnp.array(lowercase )
self.assertTrue(np.allclose(reshape(lowercase , (12, 5) ) , np.asarray(reshape(lowercase , (12, 5) ) ) ) )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowercase ) , np.squeeze(lowercase ) ) )
_snake_case = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.squeeze(lowercase , axis=2 ) ) )
@require_torch
def A ( self : Dict ):
'''simple docstring'''
_snake_case = np.random.randn(1 , 3 , 4 )
_snake_case = torch.tensor(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) )
_snake_case = np.random.randn(1 , 4 , 1 , 5 )
_snake_case = torch.tensor(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) )
@require_tf
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = np.random.randn(1 , 3 , 4 )
_snake_case = tf.constant(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , squeeze(lowercase ).numpy() ) )
_snake_case = np.random.randn(1 , 4 , 1 , 5 )
_snake_case = tf.constant(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , squeeze(lowercase , axis=2 ).numpy() ) )
@require_flax
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = np.random.randn(1 , 3 , 4 )
_snake_case = jnp.array(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase ) , np.asarray(squeeze(lowercase ) ) ) )
_snake_case = np.random.randn(1 , 4 , 1 , 5 )
_snake_case = jnp.array(lowercase )
self.assertTrue(np.allclose(squeeze(lowercase , axis=2 ) , np.asarray(squeeze(lowercase , axis=2 ) ) ) )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.expand_dims(lowercase , axis=1 ) ) )
@require_torch
def A ( self : Dict ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = torch.tensor(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) )
@require_tf
def A ( self : Any ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = tf.constant(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , expand_dims(lowercase , axis=1 ).numpy() ) )
@require_flax
def A ( self : Any ):
'''simple docstring'''
_snake_case = np.random.randn(3 , 4 )
_snake_case = jnp.array(lowercase )
self.assertTrue(np.allclose(expand_dims(lowercase , axis=1 ) , np.asarray(expand_dims(lowercase , axis=1 ) ) ) ) | 282 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = "bit"
_UpperCAmelCase : Optional[int] = ["preactivation", "bottleneck"]
_UpperCAmelCase : str = ["SAME", "VALID"]
def __init__( self : Optional[int] , lowercase : Dict=3 , lowercase : List[str]=64 , lowercase : Optional[int]=[256, 512, 1_024, 2_048] , lowercase : Any=[3, 4, 6, 3] , lowercase : str="preactivation" , lowercase : List[str]="relu" , lowercase : Tuple=None , lowercase : Union[str, Any]=32 , lowercase : Any=0.0 , lowercase : Optional[int]=False , lowercase : List[Any]=32 , lowercase : Optional[Any]=1 , lowercase : int=None , lowercase : int=None , **lowercase : Dict , ):
'''simple docstring'''
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_snake_case = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = global_padding
_snake_case = num_groups
_snake_case = drop_path_rate
_snake_case = embedding_dynamic_padding
_snake_case = output_stride
_snake_case = width_factor
_snake_case = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names ) | 282 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def a_ ( __lowercase : List[Any] , __lowercase : Optional[int]=False ) -> Optional[Any]:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Optional[int]=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''
else:
_snake_case = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def a_ ( __lowercase : int , __lowercase : Tuple , __lowercase : List[Any] ) -> Optional[Any]:
_snake_case = dct.pop(__lowercase )
_snake_case = val
def a_ ( ) -> Union[str, Any]:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[Any] , __lowercase : Dict ) -> List[str]:
_snake_case = DeiTConfig()
# all deit models have fine-tuned heads
_snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_snake_case = 1_000
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = int(deit_name[-6:-4] )
_snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
_snake_case = 192
_snake_case = 768
_snake_case = 12
_snake_case = 3
elif deit_name[9:].startswith('small' ):
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
# load original model from timm
_snake_case = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = timm_model.state_dict()
_snake_case = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
# load HuggingFace model
_snake_case = DeiTForImageClassificationWithTeacher(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
_snake_case = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_snake_case = DeiTImageProcessor(size=__lowercase , crop_size=config.image_size )
_snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case = encoding['pixel_values']
_snake_case = model(__lowercase )
_snake_case = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1E-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path) | 282 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __lowercase : Dict , __lowercase : str=0.9_9_9 , __lowercase : Union[str, Any]="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowercase : Union[str, Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowercase : Any ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_snake_case = []
for i in range(__lowercase ):
_snake_case = i / num_diffusion_timesteps
_snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowercase ) / alpha_bar_fn(__lowercase ) , __lowercase ) )
return torch.tensor(__lowercase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase : List[str] = 2
@register_to_config
def __init__( self : Tuple , lowercase : int = 1_000 , lowercase : float = 0.00085 , lowercase : float = 0.012 , lowercase : str = "linear" , lowercase : Optional[Union[np.ndarray, List[float]]] = None , lowercase : str = "epsilon" , lowercase : Optional[bool] = False , lowercase : Optional[bool] = False , lowercase : float = 1.0 , lowercase : str = "linspace" , lowercase : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
_snake_case = torch.tensor(lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
_snake_case = torch.linspace(lowercase , lowercase , lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case = betas_for_alpha_bar(lowercase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_snake_case = betas_for_alpha_bar(lowercase , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
_snake_case = 1.0 - self.betas
_snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowercase , lowercase , lowercase )
_snake_case = use_karras_sigmas
def A ( self : Any , lowercase : Optional[Any] , lowercase : Any=None ):
'''simple docstring'''
if schedule_timesteps is None:
_snake_case = self.timesteps
_snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_snake_case = 1 if len(lowercase ) > 1 else 0
else:
_snake_case = timestep.cpu().item() if torch.is_tensor(lowercase ) else timestep
_snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A ( self : Any ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A ( self : List[str] , lowercase : torch.FloatTensor , lowercase : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
_snake_case = self.index_for_timestep(lowercase )
_snake_case = self.sigmas[step_index]
_snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A ( self : Any , lowercase : int , lowercase : Union[str, torch.device] = None , lowercase : Optional[int] = None , ):
'''simple docstring'''
_snake_case = num_inference_steps
_snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_snake_case = np.linspace(0 , num_train_timesteps - 1 , lowercase , dtype=lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case = (np.arange(0 , lowercase ) * step_ratio).round()[::-1].copy().astype(lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case = (np.arange(lowercase , 0 , -step_ratio )).round().copy().astype(lowercase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_snake_case = np.log(lowercase )
_snake_case = np.interp(lowercase , np.arange(0 , len(lowercase ) ) , lowercase )
if self.config.use_karras_sigmas:
_snake_case = self._convert_to_karras(in_sigmas=lowercase , num_inference_steps=self.num_inference_steps )
_snake_case = np.array([self._sigma_to_t(lowercase , lowercase ) for sigma in sigmas] )
_snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_snake_case = torch.from_numpy(lowercase ).to(device=lowercase )
_snake_case = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_snake_case = torch.from_numpy(lowercase )
_snake_case = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowercase ).startswith('mps' ):
# mps does not support float64
_snake_case = timesteps.to(lowercase , dtype=torch.floataa )
else:
_snake_case = timesteps.to(device=lowercase )
# empty dt and derivative
_snake_case = None
_snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_snake_case = defaultdict(lowercase )
def A ( self : List[str] , lowercase : str , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = np.log(lowercase )
# get distribution
_snake_case = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_snake_case = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_snake_case = low_idx + 1
_snake_case = log_sigmas[low_idx]
_snake_case = log_sigmas[high_idx]
# interpolate sigmas
_snake_case = (low - log_sigma) / (low - high)
_snake_case = np.clip(lowercase , 0 , 1 )
# transform interpolation to time range
_snake_case = (1 - w) * low_idx + w * high_idx
_snake_case = t.reshape(sigma.shape )
return t
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : str ):
'''simple docstring'''
_snake_case = in_sigmas[-1].item()
_snake_case = in_sigmas[0].item()
_snake_case = 7.0 # 7.0 is the value used in the paper
_snake_case = np.linspace(0 , 1 , lowercase )
_snake_case = sigma_min ** (1 / rho)
_snake_case = sigma_max ** (1 / rho)
_snake_case = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.dt is None
def A ( self : List[Any] , lowercase : Union[torch.FloatTensor, np.ndarray] , lowercase : Union[float, torch.FloatTensor] , lowercase : Union[torch.FloatTensor, np.ndarray] , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = self.index_for_timestep(lowercase )
# advance index counter by 1
_snake_case = timestep.cpu().item() if torch.is_tensor(lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_snake_case = self.sigmas[step_index]
_snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_snake_case = self.sigmas[step_index - 1]
_snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_snake_case = 0
_snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_snake_case = sigma_hat if self.state_in_first_order else sigma_next
_snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_snake_case = sigma_hat if self.state_in_first_order else sigma_next
_snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
_snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_snake_case = sigma_next - sigma_hat
# store for 2nd order step
_snake_case = derivative
_snake_case = dt
_snake_case = sample
else:
# 2. 2nd order / Heun's method
_snake_case = (sample - pred_original_sample) / sigma_next
_snake_case = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_snake_case = self.dt
_snake_case = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase )
def A ( self : Dict , lowercase : torch.FloatTensor , lowercase : torch.FloatTensor , lowercase : torch.FloatTensor , ):
'''simple docstring'''
_snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowercase ):
# mps does not support float64
_snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_snake_case = self.timesteps.to(original_samples.device )
_snake_case = timesteps.to(original_samples.device )
_snake_case = [self.index_for_timestep(lowercase , lowercase ) for t in timesteps]
_snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_snake_case = sigma.unsqueeze(-1 )
_snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
'''simple docstring'''
return self.config.num_train_timesteps | 282 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 282 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = "timesformer"
def __init__( self : int , lowercase : Dict=224 , lowercase : List[str]=16 , lowercase : str=3 , lowercase : List[Any]=8 , lowercase : int=768 , lowercase : Dict=12 , lowercase : Optional[int]=12 , lowercase : Union[str, Any]=3_072 , lowercase : List[Any]="gelu" , lowercase : Dict=0.0 , lowercase : Tuple=0.0 , lowercase : Union[str, Any]=0.02 , lowercase : str=1E-6 , lowercase : Union[str, Any]=True , lowercase : Any="divided_space_time" , lowercase : Union[str, Any]=0 , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = qkv_bias
_snake_case = attention_type
_snake_case = drop_path_rate | 282 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def A ( self : Optional[Any] ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Dict ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def A ( self : Optional[int] ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_snake_case = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Any = Accelerator()
_lowerCamelCase : Optional[int] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[int] = ''''''
_lowerCamelCase : List[str] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowerCamelCase : int = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
_lowerCamelCase : List[str] = '''sshleifer/student_marian_en_ro_6_1'''
_lowerCamelCase : Optional[Any] = '''sshleifer/tiny-mbart'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : str , lowercase : Optional[int]=False , lowercase : Union[str, Any]=None , lowercase : Optional[int]=True , lowercase : List[str]=True , lowercase : Tuple=True , lowercase : List[str]=True , ):
'''simple docstring'''
_snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowercase , num_train_epochs=1 , distributed=lowercase , extra_args_str=lowercase , predict_with_generate=lowercase , do_train=lowercase , do_eval=lowercase , do_predict=lowercase , )
_snake_case = TrainerState.load_from_json(os.path.join(lowercase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
_snake_case = [log for log in logs if 'eval_loss' in log.keys()]
_snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_snake_case = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , lowercase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A ( self : Any ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A ( self : List[str] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase )
@require_torch_multi_gpu
def A ( self : Any ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A ( self : Dict ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A ( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A ( self : List[str] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=lowercase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A ( self : int ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=lowercase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=lowercase )
@require_apex
@require_torch_gpu
def A ( self : Any ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowercase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowercase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def A ( self : Tuple , lowercase : Any ):
'''simple docstring'''
_snake_case = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_snake_case = experiments[experiment_id]
_snake_case = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_snake_case = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowercase , extra_args_str=data['extra_args_str'] )
_snake_case = len(re.findall(lowercase , cl.err ) )
self.assertEqual(lowercase , data['n_matches'] )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowercase , )
# Check metrics
_snake_case = TrainerState.load_from_json(os.path.join(lowercase , 'trainer_state.json' ) ).log_history
_snake_case = [log for log in logs if 'eval_loss' in log.keys()]
_snake_case = eval_metrics[0]
_snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , lowercase )
# test if do_predict saves generations and metrics
_snake_case = os.listdir(lowercase )
_snake_case = {os.path.basename(lowercase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A ( self : Tuple ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowercase : str ) -> Tuple[int, float]:
_snake_case = '--skip_memory_metrics 0'
_snake_case = self.run_trainer(
max_len=128 , model_name=lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=lowercase , distributed=lowercase , extra_args_str=lowercase , do_eval=lowercase , do_predict=lowercase , n_gpus_to_use=1 , )
# Check metrics
_snake_case = TrainerState.load_from_json(Path(lowercase , 'trainer_state.json' ) ).log_history
_snake_case = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
_snake_case = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
_snake_case = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_snake_case , _snake_case , _snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_snake_case , _snake_case , _snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
_snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_snake_case = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowercase , lowercase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
lowercase , lowercase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
lowercase , lowercase , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A ( self : Optional[Any] , lowercase : int , lowercase : str , lowercase : int , lowercase : float = 3E-3 , lowercase : str = "adafactor" , lowercase : bool = False , lowercase : str = None , lowercase : int = 0 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , lowercase : int = None , ):
'''simple docstring'''
_snake_case = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_snake_case = self.get_auto_remove_tmp_dir()
_snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowercase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowercase )}
'''.split()
_snake_case = '\n --do_predict\n '.split()
_snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_snake_case = get_gpu_count()
_snake_case = get_torch_dist_unique_port()
_snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
else:
_snake_case = ['run_translation.py'] + args
with patch.object(lowercase , 'argv' , lowercase ):
main()
return output_dir | 282 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import numpy as np
from transformers import Pipeline
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case = np.max(__lowercase , axis=-1 , keepdims=__lowercase )
_snake_case = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
_snake_case = {}
if "second_text" in kwargs:
_snake_case = kwargs['second_text']
return preprocess_kwargs, {}, {}
def A ( self : Any , lowercase : Optional[Any] , lowercase : Tuple=None ):
'''simple docstring'''
return self.tokenizer(lowercase , text_pair=lowercase , return_tensors=self.framework )
def A ( self : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Dict , lowercase : Tuple ):
'''simple docstring'''
_snake_case = model_outputs.logits[0].numpy()
_snake_case = softmax(lowercase )
_snake_case = np.argmax(lowercase )
_snake_case = self.model.config.idalabel[best_class]
_snake_case = probabilities[best_class].item()
_snake_case = logits.tolist()
return {"label": label, "score": score, "logits": logits} | 282 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 1 |
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[Any] = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
_lowerCamelCase : Union[str, Any] = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
_lowerCamelCase : List[str] = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def A ( self : Dict , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
_snake_case = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
_snake_case = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_snake_case = evaluate(dataset=lowercase , predictions=lowercase )
return score | 282 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 1 |
from __future__ import annotations
def a_ ( __lowercase : list[int] , __lowercase : list[int] , __lowercase : list[int] , __lowercase : list[list[str]] , __lowercase : int , ) -> None:
_snake_case = len(__lowercase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowercase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowercase , __lowercase , )
def a_ ( __lowercase : int ) -> None:
_snake_case = []
depth_first_search([] , [] , [] , __lowercase , __lowercase )
# Print all the boards
for board in boards:
for column in board:
print(__lowercase )
print('' )
print(len(__lowercase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 282 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 1 |
def a_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
def update_area_of_max_square(__lowercase : int , __lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_snake_case = update_area_of_max_square(__lowercase , col + 1 )
_snake_case = update_area_of_max_square(row + 1 , col + 1 )
_snake_case = update_area_of_max_square(row + 1 , __lowercase )
if mat[row][col]:
_snake_case = 1 + min([right, diagonal, down] )
_snake_case = max(largest_square_area[0] , __lowercase )
return sub_problem_sol
else:
return 0
_snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def a_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
def update_area_of_max_square_using_dp_array(
__lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_snake_case = update_area_of_max_square_using_dp_array(__lowercase , col + 1 , __lowercase )
_snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowercase )
_snake_case = update_area_of_max_square_using_dp_array(row + 1 , __lowercase , __lowercase )
if mat[row][col]:
_snake_case = 1 + min([right, diagonal, down] )
_snake_case = max(largest_square_area[0] , __lowercase )
_snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
_snake_case = [0]
_snake_case = [[-1] * cols for _ in range(__lowercase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowercase )
return largest_square_area[0]
def a_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
_snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
_snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_snake_case = dp_array[row][col + 1]
_snake_case = dp_array[row + 1][col + 1]
_snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
_snake_case = 1 + min(__lowercase , __lowercase , __lowercase )
_snake_case = max(dp_array[row][col] , __lowercase )
else:
_snake_case = 0
return largest_square_area
def a_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
_snake_case = [0] * (cols + 1)
_snake_case = [0] * (cols + 1)
_snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_snake_case = current_row[col + 1]
_snake_case = next_row[col + 1]
_snake_case = next_row[col]
if mat[row][col] == 1:
_snake_case = 1 + min(__lowercase , __lowercase , __lowercase )
_snake_case = max(current_row[col] , __lowercase )
else:
_snake_case = 0
_snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) | 282 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = VideoToVideoSDPipeline
_UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
_UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
_UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCAmelCase : str = False
# No `output_type`.
_UpperCAmelCase : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
_snake_case = CLIPTextModel(lowercase )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def A ( self : str , lowercase : str , lowercase : List[str]=0 ):
'''simple docstring'''
_snake_case = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = VideoToVideoSDPipeline(**lowercase )
_snake_case = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
_snake_case = self.get_dummy_inputs(lowercase )
_snake_case = 'np'
_snake_case = sd_pipe(**lowercase ).frames
_snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_snake_case = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def A ( self : Tuple ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case = torch.randn((1, 10, 3, 1_024, 576) , generator=lowercase )
_snake_case = video.to('cuda' )
_snake_case = 'Spiderman is surfing'
_snake_case = pipe(lowercase , video=lowercase , generator=lowercase , num_inference_steps=3 , output_type='pt' ).frames
_snake_case = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2 | 282 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 1 |
def a_ ( __lowercase : int ) -> int:
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
_snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 1 |
def a_ ( __lowercase : str ) -> str:
return "".join(chr(ord(__lowercase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 1 |
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : str=None , lowercase : Optional[int]=None ):
'''simple docstring'''
_snake_case = list(poly_a or [0] )[:]
_snake_case = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_snake_case = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_snake_case = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_snake_case = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_snake_case = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_snake_case = self.__multiply()
def A ( self : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowercase ) <= 1:
return dft[0]
#
_snake_case = self.c_max_length // 2
while next_ncol > 0:
_snake_case = [[] for i in range(lowercase )]
_snake_case = self.root**next_ncol
# First half of next step
_snake_case = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowercase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_snake_case = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowercase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_snake_case = new_dft
_snake_case = next_ncol // 2
return dft[0]
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.__dft('A' )
_snake_case = self.__dft('B' )
_snake_case = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_snake_case = 2
while next_ncol <= self.c_max_length:
_snake_case = [[] for i in range(lowercase )]
_snake_case = self.root ** (next_ncol // 2)
_snake_case = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_snake_case = new_inverse_c
next_ncol *= 2
# Unpack
_snake_case = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'A = ' + ' + '.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_snake_case = 'B = ' + ' + '.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_snake_case = 'A*B = ' + ' + '.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : Optional[Any] , lowercase : Any=3 , lowercase : Dict=32 , lowercase : List[str]=3 , lowercase : str=10 , lowercase : List[Any]=[10, 20, 30, 40] , lowercase : List[str]=[1, 1, 2, 1] , lowercase : Any=True , lowercase : str=True , lowercase : Optional[int]="relu" , lowercase : Tuple=3 , lowercase : int=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : List[Any] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , lowercase : List[str] , lowercase : str , lowercase : str ):
'''simple docstring'''
_snake_case = TFRegNetModel(config=lowercase )
_snake_case = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Optional[int] , lowercase : Dict , lowercase : Any , lowercase : Dict ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = TFRegNetForImageClassification(lowercase )
_snake_case = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Any = False
def A ( self : str ):
'''simple docstring'''
_snake_case = TFRegNetModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Dict ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A ( self : str ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def A ( self : Dict ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : str , lowercase : Dict , lowercase : List[str] ):
_snake_case = model_class(lowercase )
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Union[str, Any]={} ):
_snake_case = model(lowercase , return_dict=lowercase , **lowercase )
_snake_case = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : List[Any] , lowercase : int ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = self._prepare_for_class(lowercase , lowercase )
_snake_case = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
_snake_case = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_snake_case = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
_snake_case = self._prepare_for_class(lowercase , lowercase )
_snake_case = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
_snake_case = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_snake_case = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> List[Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
_snake_case = model(**lowercase , training=lowercase )
# verify the logits
_snake_case = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 ) | 282 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 1 |
def a_ ( __lowercase : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
_snake_case = sum(__lowercase ) / len(__lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 282 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 1 |
def a_ ( __lowercase : int ) -> int:
_snake_case = abs(__lowercase )
_snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def a_ ( __lowercase : int ) -> int:
_snake_case = abs(__lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a_ ( __lowercase : int ) -> int:
return sum(int(__lowercase ) for c in str(abs(__lowercase ) ) )
def a_ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowercase : Callable , __lowercase : int ) -> None:
_snake_case = f'''{func.__name__}({value})'''
_snake_case = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(__lowercase )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowercase , __lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 282 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 1 |
def a_ ( __lowercase : int = 1_000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 282 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCamelCase : Optional[int] = numpy.array([0, 0])
_lowerCamelCase : Tuple = numpy.array([0.5, 0.8_6_6_0_2_5_4])
_lowerCamelCase : Tuple = numpy.array([1, 0])
_lowerCamelCase : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a_ ( __lowercase : list[numpy.ndarray] , __lowercase : int ) -> list[numpy.ndarray]:
_snake_case = initial_vectors
for _ in range(__lowercase ):
_snake_case = iteration_step(__lowercase )
return vectors
def a_ ( __lowercase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_snake_case = []
for i, start_vector in enumerate(vectors[:-1] ):
_snake_case = vectors[i + 1]
new_vectors.append(__lowercase )
_snake_case = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a_ ( __lowercase : numpy.ndarray , __lowercase : float ) -> numpy.ndarray:
_snake_case = numpy.radians(__lowercase )
_snake_case , _snake_case = numpy.cos(__lowercase ), numpy.sin(__lowercase )
_snake_case = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowercase , __lowercase )
def a_ ( __lowercase : list[numpy.ndarray] ) -> None:
_snake_case = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_snake_case , _snake_case = zip(*__lowercase )
plt.plot(__lowercase , __lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 282 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : np.ndarray
_UpperCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker | 282 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ["pixel_values"]
def __init__( self : str , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowercase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = size if size is not None else {'shortest_edge': 224}
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(lowercase , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A ( self : List[str] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_snake_case = int((256 / 224) * size['shortest_edge'] )
_snake_case = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
_snake_case = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
lowercase , size=(size_dict['height'], size_dict['width']) , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : List[str] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Dict , ):
'''simple docstring'''
_snake_case = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase )
def A ( self : str , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Dict , ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[Any] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : List[Any] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = None , lowercase : Optional[bool] = None , lowercase : Optional[Dict[str, int]] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, Iterable[float]]] = None , lowercase : Optional[Union[float, Iterable[float]]] = None , lowercase : Optional[TensorType] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Tuple , ):
'''simple docstring'''
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowercase , param_name='crop_size' )
_snake_case = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_snake_case = [self.resize(lowercase , lowercase , lowercase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(lowercase , lowercase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(lowercase , lowercase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(lowercase , lowercase , lowercase ) for image in images]
_snake_case = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_snake_case = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 282 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 1 |
from __future__ import annotations
_lowerCamelCase : Tuple = 8.988E9 # units = N * m^s * C^-2
def a_ ( __lowercase : float , __lowercase : float , __lowercase : float , __lowercase : float ) -> dict[str, float]:
_snake_case = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_snake_case = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_snake_case = abs(__lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_snake_case = abs(__lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_snake_case = (COULOMBS_CONSTANT * charge_product / abs(__lowercase )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
def a_ ( __lowercase : nn.ModuleList , __lowercase : nn.ModuleList , __lowercase : List[int] ) -> None:
_snake_case = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowercase ) == len(__lowercase ), f'''{len(__lowercase )} != {len(__lowercase )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_lowerCamelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_lowerCamelCase : str = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> int:
try:
_snake_case = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(__lowercase ) )
def a_ ( __lowercase : Dict , __lowercase : Optional[Any] ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(__lowercase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a_ ( __lowercase : Union[str, PreTrainedModel] , __lowercase : Union[str, Path] = "student" , __lowercase : Union[int, None] = None , __lowercase : Union[int, None] = None , __lowercase : List[str]=False , __lowercase : Tuple=None , __lowercase : str=None , **__lowercase : Any , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
_snake_case = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowercase , __lowercase ):
AutoTokenizer.from_pretrained(__lowercase ).save_pretrained(__lowercase ) # purely for convenience
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).eval()
else:
assert isinstance(__lowercase , __lowercase ), f'''teacher must be a model or string got type {type(__lowercase )}'''
_snake_case = teacher.config.to_diff_dict()
try:
_snake_case , _snake_case = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_snake_case = teacher_e
if d is None:
_snake_case = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_snake_case , _snake_case = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_snake_case , _snake_case = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_snake_case = teacher_e
if d is None:
_snake_case = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowercase )
# Copy weights
_snake_case = teacher.config_class(**__lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_config(__lowercase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_snake_case = student.load_state_dict(teacher.state_dict() , strict=__lowercase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_snake_case , _snake_case = list(range(__lowercase ) ), list(range(__lowercase ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(__lowercase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_snake_case = pick_layers_to_copy(__lowercase , __lowercase )
if d_layers_to_copy is None:
_snake_case = pick_layers_to_copy(__lowercase , __lowercase )
try:
if hasattr(
__lowercase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowercase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowercase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowercase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowercase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowercase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowercase )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_snake_case = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowercase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 282 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 282 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase : Union[str, Any] = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCamelCase : int = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCamelCase : Union[str, Any] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def A ( self : Dict , lowercase : List[List[List[str]]] , lowercase : List[List[str]] , lowercase : int = 1 , lowercase : int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
} | 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Any=True , __lowercase : Union[str, Any]="pt" ) -> Dict:
_snake_case = {'add_prefix_space': True} if isinstance(__lowercase , __lowercase ) and not line.startswith(' ' ) else {}
_snake_case = padding_side
return tokenizer(
[line] , max_length=__lowercase , padding='max_length' if pad_to_max_length else None , truncation=__lowercase , return_tensors=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
def a_ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : Tuple=None , ) -> List[str]:
_snake_case = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : Dict , lowercase : Any , lowercase : Optional[Any] , lowercase : Any , lowercase : List[Any]="train" , lowercase : Dict=None , lowercase : Dict=None , lowercase : Dict=None , lowercase : Optional[Any]="" , ):
'''simple docstring'''
super().__init__()
_snake_case = Path(lowercase ).joinpath(type_path + '.source' )
_snake_case = Path(lowercase ).joinpath(type_path + '.target' )
_snake_case = self.get_char_lens(self.src_file )
_snake_case = max_source_length
_snake_case = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_snake_case = tokenizer
_snake_case = prefix
if n_obs is not None:
_snake_case = self.src_lens[:n_obs]
_snake_case = src_lang
_snake_case = tgt_lang
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = index + 1 # linecache starts at 1
_snake_case = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip('\n' )
_snake_case = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip('\n' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
_snake_case = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
_snake_case = encode_line(lowercase , lowercase , self.max_source_length , 'right' )
_snake_case = encode_line(lowercase , lowercase , self.max_target_length , 'right' )
_snake_case = source_inputs['input_ids'].squeeze()
_snake_case = target_inputs['input_ids'].squeeze()
_snake_case = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A ( lowercase : str ):
'''simple docstring'''
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def A ( self : Any , lowercase : List[str] ):
'''simple docstring'''
_snake_case = torch.stack([x['input_ids'] for x in batch] )
_snake_case = torch.stack([x['attention_mask'] for x in batch] )
_snake_case = torch.stack([x['decoder_input_ids'] for x in batch] )
_snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
_snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
_snake_case = trim_batch(lowercase , lowercase )
_snake_case , _snake_case = trim_batch(lowercase , lowercase , attention_mask=lowercase )
_snake_case = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCamelCase : Union[str, Any] = getLogger(__name__)
def a_ ( __lowercase : List[List] ) -> Tuple:
return list(itertools.chain.from_iterable(__lowercase ) )
def a_ ( __lowercase : str ) -> None:
_snake_case = get_git_info()
save_json(__lowercase , os.path.join(__lowercase , 'git_log.json' ) )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Optional[int]=4 , **__lowercase : int ) -> List[Any]:
with open(__lowercase , 'w' ) as f:
json.dump(__lowercase , __lowercase , indent=__lowercase , **__lowercase )
def a_ ( __lowercase : Optional[Any] ) -> int:
with open(__lowercase ) as f:
return json.load(__lowercase )
def a_ ( ) -> int:
_snake_case = git.Repo(search_parent_directories=__lowercase )
_snake_case = {
'repo_id': str(__lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a_ ( __lowercase : Callable , __lowercase : Iterable ) -> List:
return list(map(__lowercase , __lowercase ) )
def a_ ( __lowercase : str , __lowercase : Optional[int] ) -> Tuple:
with open(__lowercase , 'wb' ) as f:
return pickle.dump(__lowercase , __lowercase )
def a_ ( __lowercase : Optional[Any] ) -> Any:
def remove_articles(__lowercase : Tuple ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __lowercase )
def white_space_fix(__lowercase : str ):
return " ".join(text.split() )
def remove_punc(__lowercase : str ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[Any] ) -> Any:
_snake_case = normalize_answer(__lowercase ).split()
_snake_case = normalize_answer(__lowercase ).split()
_snake_case = Counter(__lowercase ) & Counter(__lowercase )
_snake_case = sum(common.values() )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(__lowercase )
_snake_case = 1.0 * num_same / len(__lowercase )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( __lowercase : Any , __lowercase : int ) -> List[str]:
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def a_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Dict:
assert len(__lowercase ) == len(__lowercase )
_snake_case = 0
for hypo, pred in zip(__lowercase , __lowercase ):
em += exact_match_score(__lowercase , __lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def a_ ( __lowercase : str ) -> Optional[int]:
return model_prefix.startswith('rag' )
def a_ ( __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
_snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case = 'dropout_rate'
for p in extra_params:
if getattr(__lowercase , __lowercase , __lowercase ):
if not hasattr(__lowercase , __lowercase ) and not hasattr(__lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__lowercase ) )
delattr(__lowercase , __lowercase )
continue
_snake_case = p if hasattr(__lowercase , __lowercase ) else equivalent_param[p]
setattr(__lowercase , __lowercase , getattr(__lowercase , __lowercase ) )
delattr(__lowercase , __lowercase )
return hparams, config | 282 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowerCamelCase : str = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowerCamelCase : Dict = {'''facebook/blenderbot-3B''': 128}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ["input_ids", "attention_mask"]
_UpperCAmelCase : Dict = BlenderbotTokenizer
def __init__( self : List[Any] , lowercase : int=None , lowercase : Any=None , lowercase : Tuple=None , lowercase : Dict="replace" , lowercase : Optional[int]="<s>" , lowercase : Optional[Any]="</s>" , lowercase : Any="</s>" , lowercase : List[Any]="<s>" , lowercase : List[Any]="<unk>" , lowercase : Any="<pad>" , lowercase : List[str]="<mask>" , lowercase : List[str]=False , lowercase : Optional[int]=True , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = getattr(lowercase , pre_tok_state.pop('type' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**lowercase )
_snake_case = add_prefix_space
_snake_case = 'post_processor'
_snake_case = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state['sep'] )
if "cls" in state:
_snake_case = tuple(state['cls'] )
_snake_case = False
if state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get('trim_offsets' , lowercase ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(lowercase , state.pop('type' ) )
_snake_case = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def A ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
_snake_case = value
def A ( self : int , *lowercase : Any , **lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def A ( self : Dict , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def A ( self : List[Any] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def A ( self : Optional[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : List[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def A ( self : Tuple , lowercase : "Conversation" ):
'''simple docstring'''
_snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowercase )
_snake_case = ' '.join(lowercase )
_snake_case = self.encode(lowercase )
if len(lowercase ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids | 282 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = sd_pipe.prepare_inputs(lowercase )
_snake_case = replicate(lowercase )
_snake_case = shard(lowercase )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(lowercase , jax.device_count() )
_snake_case = sd_pipe(lowercase , lowercase , lowercase , num_inference_steps=25 , jit=lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'stabilityai/stable-diffusion-2'
_snake_case , _snake_case = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase , subfolder='scheduler' )
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , revision='bf16' , dtype=jnp.bfloataa , )
_snake_case = scheduler_params
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = sd_pipe.prepare_inputs(lowercase )
_snake_case = replicate(lowercase )
_snake_case = shard(lowercase )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(lowercase , jax.device_count() )
_snake_case = sd_pipe(lowercase , lowercase , lowercase , num_inference_steps=25 , jit=lowercase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 282 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def a_ ( __lowercase : Iterable[str] , __lowercase : int ) -> Generator[tuple[str, ...], None, None]:
_snake_case = iter(__lowercase )
while True:
_snake_case = tuple(itertools.islice(__lowercase , __lowercase ) )
if not chunk:
return
yield chunk
def a_ ( __lowercase : str ) -> str:
_snake_case = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_snake_case = ''
if len(__lowercase ) < 2:
return dirty
for i in range(len(__lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowercase ) & 1:
clean += "X"
return clean
def a_ ( __lowercase : str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
_snake_case = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_snake_case = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowercase )
return table
def a_ ( __lowercase : str , __lowercase : str ) -> str:
_snake_case = generate_table(__lowercase )
_snake_case = prepare_input(__lowercase )
_snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
_snake_case , _snake_case = divmod(table.index(__lowercase ) , 5 )
_snake_case , _snake_case = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a_ ( __lowercase : str , __lowercase : str ) -> str:
_snake_case = generate_table(__lowercase )
_snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
_snake_case , _snake_case = divmod(table.index(__lowercase ) , 5 )
_snake_case , _snake_case = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 282 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Any = PriorTransformer
_UpperCAmelCase : List[str] = "hidden_states"
@property
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 4
_snake_case = 8
_snake_case = 7
_snake_case = floats_tensor((batch_size, embedding_dim) ).to(lowercase )
_snake_case = floats_tensor((batch_size, embedding_dim) ).to(lowercase )
_snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self : Union[str, Any] , lowercase : List[Any]=0 ):
'''simple docstring'''
torch.manual_seed(lowercase )
_snake_case = 4
_snake_case = 8
_snake_case = 7
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowercase )
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowercase )
_snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def A ( self : List[Any] ):
'''simple docstring'''
return (4, 8)
@property
def A ( self : str ):
'''simple docstring'''
return (4, 8)
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowercase )
_snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.model_class(**lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
_snake_case = model.to(lowercase )
if hasattr(lowercase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
_snake_case = self.get_dummy_seed_input()
with torch.no_grad():
_snake_case = model(**lowercase )[0]
_snake_case = output[0, :5].flatten().cpu()
print(lowercase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_snake_case = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : int=1 , lowercase : Optional[Any]=768 , lowercase : Union[str, Any]=77 , lowercase : Optional[int]=0 ):
'''simple docstring'''
torch.manual_seed(lowercase )
_snake_case = batch_size
_snake_case = embedding_dim
_snake_case = num_embeddings
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowercase )
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowercase )
_snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def A ( self : int , lowercase : Optional[Any] , lowercase : Tuple ):
'''simple docstring'''
_snake_case = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowercase )
_snake_case = self.get_dummy_seed_input(seed=lowercase )
with torch.no_grad():
_snake_case = model(**lowercase )[0]
assert list(sample.shape ) == [1, 768]
_snake_case = sample[0, :8].flatten().cpu()
print(lowercase )
_snake_case = torch.tensor(lowercase )
assert torch_all_close(lowercase , lowercase , atol=1E-3 ) | 282 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 1 |
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Optional[Any]=None , lowercase : Dict=None ):
'''simple docstring'''
_snake_case = start
_snake_case = end
_snake_case = val
_snake_case = (start + end) // 2
_snake_case = left
_snake_case = right
def __repr__( self : str ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : Sequence , lowercase : Dict ):
'''simple docstring'''
_snake_case = collection
_snake_case = function
if self.collection:
_snake_case = self._build_tree(0 , len(lowercase ) - 1 )
def A ( self : str , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
self._update_tree(self.root , lowercase , lowercase )
def A ( self : List[Any] , lowercase : Any , lowercase : int ):
'''simple docstring'''
return self._query_range(self.root , lowercase , lowercase )
def A ( self : Any , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowercase , lowercase , self.collection[start] )
_snake_case = (start + end) // 2
_snake_case = self._build_tree(lowercase , lowercase )
_snake_case = self._build_tree(mid + 1 , lowercase )
return SegmentTreeNode(lowercase , lowercase , self.fn(left.val , right.val ) , lowercase , lowercase )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
_snake_case = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase , lowercase )
else:
self._update_tree(node.right , lowercase , lowercase )
_snake_case = self.fn(node.left.val , node.right.val )
def A ( self : Dict , lowercase : Optional[Any] , lowercase : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase , lowercase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
if self.root is not None:
_snake_case = Queue()
queue.put(self.root )
while not queue.empty():
_snake_case = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
_lowerCamelCase : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 282 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
_lowerCamelCase : Optional[int] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_lowerCamelCase : str = BASE_URL + '''/user'''
# https://github.com/settings/tokens
_lowerCamelCase : List[Any] = os.environ.get('''USER_TOKEN''', '''''')
def a_ ( __lowercase : str ) -> dict[Any, Any]:
_snake_case = {
'Authorization': f'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(__lowercase , headers=__lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''') | 282 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : int = KandinskyVaaControlnetImgaImgPipeline
_UpperCAmelCase : List[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase : Any = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase : int = False
@property
def A ( self : int ):
'''simple docstring'''
return 32
@property
def A ( self : int ):
'''simple docstring'''
return 32
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : int ):
'''simple docstring'''
return 100
@property
def A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case = UNetaDConditionModel(**lowercase )
return model
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.dummy_unet
_snake_case = self.dummy_movq
_snake_case = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_snake_case = DDIMScheduler(**lowercase )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : List[Any]=0 ):
'''simple docstring'''
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
_snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
_snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((256, 256) )
# create hint
_snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
if str(lowercase ).startswith('mps' ):
_snake_case = torch.manual_seed(lowercase )
else:
_snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase )
_snake_case = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowercase )
_snake_case = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_snake_case = pipe(**self.get_dummy_inputs(lowercase ) )
_snake_case = output.images
_snake_case = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_snake_case = init_image.resize((512, 512) )
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_snake_case = torch.from_numpy(np.array(lowercase ) ).float() / 255.0
_snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_snake_case = 'A robot, 4k photo'
_snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
_snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_snake_case = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
_snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case , _snake_case = pipe_prior(
lowercase , image=lowercase , strength=0.85 , generator=lowercase , negative_prompt='' , ).to_tuple()
_snake_case = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , hint=lowercase , generator=lowercase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase , lowercase ) | 282 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self : str , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : int = 0.9 , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = size if size is not None else {'shortest_edge': 224}
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(lowercase , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = crop_pct
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[float] = None , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
_snake_case = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_snake_case = int(size['height'] / crop_pct )
else:
_snake_case = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(lowercase ) )
_snake_case = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
else:
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(lowercase ) )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : int , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
_snake_case = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase )
def A ( self : str , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[int] , ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : List[str] , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : int = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = crop_pct if crop_pct is not None else self.crop_pct
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowercase , param_name='crop_size' )
_snake_case = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
_snake_case = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_snake_case = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 282 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : jnp.ndarray
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = 3_2
_UpperCAmelCase : int = 4
_UpperCAmelCase : int = 4
_UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_UpperCAmelCase : Union[bool, Tuple[bool]] = False
_UpperCAmelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[int, Tuple[int]] = 8
_UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase : int = 1_2_8_0
_UpperCAmelCase : float = 0.0
_UpperCAmelCase : bool = False
_UpperCAmelCase : jnp.dtype = jnp.floataa
_UpperCAmelCase : bool = True
_UpperCAmelCase : int = 0
_UpperCAmelCase : bool = False
def A ( self : List[Any] , lowercase : jax.random.KeyArray ):
'''simple docstring'''
_snake_case = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case = jnp.zeros(lowercase , dtype=jnp.floataa )
_snake_case = jnp.ones((1,) , dtype=jnp.intaa )
_snake_case = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_snake_case , _snake_case = jax.random.split(lowercase )
_snake_case = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase , lowercase , lowercase , lowercase )["params"]
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.block_out_channels
_snake_case = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case = self.num_attention_heads or self.attention_head_dim
# input
_snake_case = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_snake_case = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_snake_case = FlaxTimestepEmbedding(lowercase , dtype=self.dtype )
_snake_case = self.only_cross_attention
if isinstance(lowercase , lowercase ):
_snake_case = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase , lowercase ):
_snake_case = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case = []
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case = FlaxCrossAttnDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_snake_case = FlaxDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase )
_snake_case = down_blocks
# mid
_snake_case = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_snake_case = []
_snake_case = list(reversed(lowercase ) )
_snake_case = list(reversed(lowercase ) )
_snake_case = list(reversed(lowercase ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = reversed_block_out_channels[min(i + 1 , len(lowercase ) - 1 )]
_snake_case = i == len(lowercase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_snake_case = FlaxCrossAttnUpBlockaD(
in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_snake_case = FlaxUpBlockaD(
in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase )
_snake_case = output_channel
_snake_case = up_blocks
# out
_snake_case = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , lowercase : List[Any] , lowercase : str , lowercase : Any , lowercase : Any=None , lowercase : Tuple=None , lowercase : bool = True , lowercase : bool = False , ):
'''simple docstring'''
if not isinstance(lowercase , jnp.ndarray ):
_snake_case = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case = timesteps.astype(dtype=jnp.floataa )
_snake_case = jnp.expand_dims(lowercase , 0 )
_snake_case = self.time_proj(lowercase )
_snake_case = self.time_embedding(lowercase )
# 2. pre-process
_snake_case = jnp.transpose(lowercase , (0, 2, 3, 1) )
_snake_case = self.conv_in(lowercase )
# 3. down
_snake_case = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase , lowercase ):
_snake_case , _snake_case = down_block(lowercase , lowercase , lowercase , deterministic=not train )
else:
_snake_case , _snake_case = down_block(lowercase , lowercase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_snake_case = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase , lowercase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_snake_case = new_down_block_res_samples
# 4. mid
_snake_case = self.mid_block(lowercase , lowercase , lowercase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_snake_case = down_block_res_samples[-(self.layers_per_block + 1) :]
_snake_case = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase , lowercase ):
_snake_case = up_block(
lowercase , temb=lowercase , encoder_hidden_states=lowercase , res_hidden_states_tuple=lowercase , deterministic=not train , )
else:
_snake_case = up_block(lowercase , temb=lowercase , res_hidden_states_tuple=lowercase , deterministic=not train )
# 6. post-process
_snake_case = self.conv_norm_out(lowercase )
_snake_case = nn.silu(lowercase )
_snake_case = self.conv_out(lowercase )
_snake_case = jnp.transpose(lowercase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase ) | 282 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowerCamelCase : int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowerCamelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def a_ ( __lowercase : str , __lowercase : Dict=100 , __lowercase : Any=" " ) -> List[str]:
_snake_case = text.split(__lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowercase ) , __lowercase )]
def a_ ( __lowercase : dict ) -> dict:
_snake_case , _snake_case = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(__lowercase ):
titles.append(title if title is not None else '' )
texts.append(__lowercase )
return {"title": titles, "text": texts}
def a_ ( __lowercase : dict , __lowercase : DPRContextEncoder , __lowercase : DPRContextEncoderTokenizerFast ) -> dict:
_snake_case = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=__lowercase , padding='longest' , return_tensors='pt' )['input_ids']
_snake_case = ctx_encoder(input_ids.to(device=__lowercase ) , return_dict=__lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a_ ( __lowercase : "RagExampleArguments" , __lowercase : "ProcessingArguments" , __lowercase : "IndexHnswArguments" , ) -> List[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_snake_case = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_snake_case = dataset.map(__lowercase , batched=__lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
_snake_case = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowercase )
_snake_case = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_snake_case = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
_snake_case = dataset.map(
partial(__lowercase , ctx_encoder=__lowercase , ctx_tokenizer=__lowercase ) , batched=__lowercase , batch_size=processing_args.batch_size , features=__lowercase , )
# And finally save your dataset
_snake_case = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(__lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_snake_case = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=__lowercase )
# And save the index
_snake_case = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(__lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : str = field(
default=str(Path(UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
_UpperCAmelCase : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
_UpperCAmelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=str(Path(UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[int] = field(
default=UpperCAmelCase ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
_UpperCAmelCase : int = field(
default=1_6 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : int = field(
default=7_6_8 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
_UpperCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowerCamelCase : List[Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Union[str, Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 282 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ) ->None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 0 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.