code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GPTSanJapaneseTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = {"""do_clean_text""": False, """add_prefix_space""": False}
def A ( self : Dict ):
"""simple docstring"""
super().setUp()
# fmt: off
__snake_case = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
__snake_case = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
__snake_case = {"unk_token": "<unk>"}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(a_ ) )
def A ( self : Any , **a_ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a_ )
def A ( self : Tuple , a_ : List[str] ):
"""simple docstring"""
__snake_case = "こんにちは、世界。 \nこんばんは、㔺界。😀"
__snake_case = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def A ( self : str , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.get_input_output_texts(a_ )
__snake_case = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
return text, ids
def A ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def A ( self : int ):
"""simple docstring"""
pass # TODO add if relevant
def A ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = "こんにちは、世界。 こんばんは、㔺界。"
__snake_case = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
__snake_case = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
__snake_case = "こんにちは、、、、世界。こんばんは、、、、世界。"
__snake_case = tokenizer.encode(a_ )
__snake_case = tokenizer.decode(a_ )
self.assertEqual(a_ , a_ )
@slow
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
__snake_case = "こんにちは、世界。"
__snake_case = "こんばんは、㔺界。😀"
__snake_case = "こんにちは、世界。こんばんは、世界。😀"
__snake_case = tokenizer.encode(prefix_text + input_text )
__snake_case = tokenizer.encode("" , prefix_text=prefix_text + input_text )
__snake_case = tokenizer.encode(a_ , prefix_text=a_ )
__snake_case = tokenizer.decode(a_ )
__snake_case = tokenizer.decode(a_ )
__snake_case = tokenizer.decode(a_ )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , a_ )
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
__snake_case = "こんにちは、世界。"
__snake_case = "こんばんは、㔺界。😀"
__snake_case = len(tokenizer.encode(a_ ) ) - 2
__snake_case = len(tokenizer.encode(a_ ) ) - 2
__snake_case = [1] + [0] * (len_prefix + len_text + 1)
__snake_case = [1] * (len_prefix + len_text + 1) + [0]
__snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case = tokenizer(a_ , prefix_text=a_ ).token_type_ids
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
@slow
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
__snake_case = tokenizer.encode("あンいワ" )
__snake_case = tokenizer.encode("" , prefix_text="あンいワ" )
__snake_case = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(a_ ) , tokenizer.decode(a_ ) )
self.assertEqual(tokenizer.decode(a_ ) , tokenizer.decode(a_ ) )
self.assertNotEqual(a_ , a_ )
self.assertNotEqual(a_ , a_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A ( self : str ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
__snake_case = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
__snake_case = tokenizer(a_ , padding=a_ )
__snake_case = tokenizer.batch_encode_plus(a_ , padding=a_ )
# fmt: off
__snake_case = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a_ )
self.assertListEqual(x_token.token_type_ids , a_ )
self.assertListEqual(x_token.attention_mask , a_ )
self.assertListEqual(x_token_a.input_ids , a_ )
self.assertListEqual(x_token_a.token_type_ids , a_ )
self.assertListEqual(x_token_a.attention_mask , a_ )
def A ( self : List[str] ):
"""simple docstring"""
pass
def A ( self : int ):
"""simple docstring"""
pass
| 69 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None) | 174 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase_ ( _lowerCamelCase: str ) -> str:
'''simple docstring'''
if "://" in dataset_path:
__lowerCamelCase : List[str] = dataset_path.split("://" )[1]
return dataset_path
def lowercase_ ( _lowerCamelCase: fsspec.AbstractFileSystem ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase_ ( _lowerCamelCase: fsspec.AbstractFileSystem , _lowerCamelCase: str , _lowerCamelCase: str ) -> str:
'''simple docstring'''
__lowerCamelCase : List[Any] = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowercase_ ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Any = None
__lowerCamelCase : str = threading.Lock() | 720 | """simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: list[list[int]] ) -> bool:
'''simple docstring'''
__lowerCamelCase : str = len(_lowerCamelCase )
# We need to create solution object to save path.
__lowerCamelCase : List[Any] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
__lowerCamelCase : Dict = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("\n".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowercase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: list[list[int]] ) -> bool:
'''simple docstring'''
__lowerCamelCase : Dict = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
__lowerCamelCase : Optional[Any] = 1
return True
__lowerCamelCase : int = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCamelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCamelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCamelCase : Dict = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
__lowerCamelCase : List[str] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 366 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
lowerCAmelCase__ = "path-to-your-trained-model"
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {"generator": generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 707 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase__ : Dict = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowerCamelCase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = 'whisper'
__lowerCAmelCase : str = ['past_key_values']
__lowerCAmelCase : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_18_65 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=15_00 , SCREAMING_SNAKE_CASE_=4_48 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : str = vocab_size
lowercase__ : Tuple = num_mel_bins
lowercase__ : int = d_model
lowercase__ : str = encoder_layers
lowercase__ : str = encoder_attention_heads
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Dict = encoder_ffn_dim
lowercase__ : Dict = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : List[str] = activation_function
lowercase__ : Dict = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : List[str] = use_cache
lowercase__ : Tuple = encoder_layers
lowercase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : int = max_source_positions
lowercase__ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ : Tuple = classifier_proj_size
lowercase__ : Optional[int] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : Optional[int] = apply_spec_augment
lowercase__ : Any = mask_time_prob
lowercase__ : str = mask_time_length
lowercase__ : int = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : List[Any] = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
lowercase__ : Dict = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
])
if self.use_past:
lowercase__ : List[str] = {0: """batch"""}
else:
lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""")
return common_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_20_50 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 2_20 , ):
'''simple docstring'''
lowercase__ : str = OrderedDict()
lowercase__ : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = encoder_inputs["""input_features"""].shape[2]
lowercase__ : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = encoder_inputs.pop("""input_features""")
lowercase__ : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""")
if "past_key_values" in decoder_inputs:
lowercase__ : Dict = decoder_inputs.pop("""past_key_values""")
return dummy_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return 1E-3
| 12 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 193 |
from collections.abc import Iterable
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a : int | None = None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , a : Node | None = None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def __UpperCamelCase ( self : List[str] , a : Node , a : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(a ): # If it is the right children
SCREAMING_SNAKE_CASE : Dict = new_children
else:
SCREAMING_SNAKE_CASE : Any = new_children
else:
SCREAMING_SNAKE_CASE : List[Any] = new_children
def __UpperCamelCase ( self : Optional[Any] , a : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCamelCase ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def __UpperCamelCase ( self : List[str] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Node(a ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE : Tuple = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE : Any = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE : str = new_node
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = parent_node.right
SCREAMING_SNAKE_CASE : int = parent_node
def __UpperCamelCase ( self : Any , *a : str ) -> None:
"""simple docstring"""
for value in values:
self.__insert(a )
def __UpperCamelCase ( self : Tuple , a : List[Any] ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
SCREAMING_SNAKE_CASE : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE : Optional[int] = node.left if value < node.value else node.right
return node
def __UpperCamelCase ( self : int , a : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE : Tuple = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = node.right
return node
def __UpperCamelCase ( self : int , a : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
SCREAMING_SNAKE_CASE : List[Any] = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE : Optional[Any] = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE : str = node.left
return node
def __UpperCamelCase ( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.search(a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(a , a )
elif node.left is None: # Has only right children
self.__reassign_nodes(a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(a , node.left )
else:
SCREAMING_SNAKE_CASE : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE : str = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCamelCase ( self : List[Any] , a : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCamelCase ( self : str , a : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCamelCase ( self : Optional[Any] , a : list , a : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(a , node.left )
arr.append(node.value )
self.inorder(a , node.right )
def __UpperCamelCase ( self : int , a : int , a : Node ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(a , a ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = []
if curr_node is not None:
SCREAMING_SNAKE_CASE : List[str] = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_a)
# Prints all the elements of the list in order traversal
print(_a)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(_a)
print(_a)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 193 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 154 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ) ->int:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase__ : List[Any] =0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 174 | 0 |
def A_ ( snake_case : int = 100 ) -> int:
'''simple docstring'''
__UpperCamelCase = (n * (n + 1) // 2) ** 2
__UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 451 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = FlaxAutoencoderKL
@property
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = 4
__UpperCamelCase = 3
__UpperCamelCase = (32, 32)
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = jax.random.uniform(SCREAMING_SNAKE_CASE_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
| 451 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A__ : Any = random.Random()
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[Any]=None ) -> Union[str, Any]:
if rng is None:
__lowerCamelCase : Any = global_rng
__lowerCamelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=20_00 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_41_00 , ) -> str:
__lowerCamelCase : Dict = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : Any = min_seq_length
__lowerCamelCase : List[Any] = max_seq_length
__lowerCamelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase : Any = spectrogram_length
__lowerCamelCase : Any = feature_size
__lowerCamelCase : List[str] = num_audio_channels
__lowerCamelCase : Dict = hop_length
__lowerCamelCase : Optional[Any] = chunk_length
__lowerCamelCase : Optional[Any] = sampling_rate
def lowercase_ ( self ) -> List[str]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Dict:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
__lowerCamelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCamelCase : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = TvltFeatureExtractor
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = TvltFeatureExtractionTester(self )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'feature_size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hop_length' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'chunk_length' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'sampling_rate' ) )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = feat_extract_first.to_dict()
__lowerCamelCase : Optional[int] = feat_extract_second.to_dict()
__lowerCamelCase : Union[str, Any] = dict_first.pop('mel_filters' )
__lowerCamelCase : List[Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = feat_extract_first.to_dict()
__lowerCamelCase : Optional[Any] = feat_extract_second.to_dict()
__lowerCamelCase : Union[str, Any] = dict_first.pop('mel_filters' )
__lowerCamelCase : str = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
# Initialize feature_extractor
__lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__lowerCamelCase : Any = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__lowerCamelCase : Dict = feature_extractor(
SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=SCREAMING_SNAKE_CASE_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__lowerCamelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowerCamelCase : Union[str, Any] = ds.sort('id' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Tuple = self._load_datasamples(1 )
__lowerCamelCase : List[Any] = TvltFeatureExtractor()
__lowerCamelCase : List[str] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
__lowerCamelCase : str = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 13 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a ( UpperCAmelCase ):
_a : Tuple = 'perceiver'
def __init__( self , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=26 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="kv" , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=262 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=56 , _SCREAMING_SNAKE_CASE=[368, 496] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=1920 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 16, 224, 224] , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
"""simple docstring"""
return 1e-4
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [' '.join(['a'] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 618 | 0 |
"""simple docstring"""
lowercase__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=1_000 ) -> Any:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase = n - 1
UpperCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase = 0
while count < prec:
UpperCAmelCase = random.randint(2 , n - 1 )
UpperCAmelCase = bin_exp_mod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if b != 1:
UpperCAmelCase = True
for _ in range(SCREAMING_SNAKE_CASE_ ):
if b == n - 1:
UpperCAmelCase = False
break
UpperCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a__ : List[Any] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 51 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: int =0
if start < end:
snake_case: Tuple =randint(__UpperCAmelCase , __UpperCAmelCase )
snake_case: Optional[Any] =a[end]
snake_case: List[Any] =a[pivot]
snake_case: int =temp
snake_case , snake_case: Optional[int] =_in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case: Tuple =0
snake_case: Tuple =randint(__UpperCAmelCase , __UpperCAmelCase )
snake_case: List[Any] =a[end]
snake_case: List[str] =a[pivot]
snake_case: Dict =temp
snake_case: Union[str, Any] =start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case: Optional[Any] =new_pivot_index + 1
snake_case: Dict =a[new_pivot_index]
snake_case: Union[str, Any] =a[index]
snake_case: List[Any] =temp
snake_case: Union[str, Any] =a[new_pivot_index + 1]
snake_case: Tuple =a[end]
snake_case: Union[str, Any] =temp
return new_pivot_index + 1, count
a = TemporaryFile()
a = 100 # 1000 elements are to be sorted
a , a = 0, 1 # mean and standard deviation
a = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
a = np.load(outfile)
a = len(M) - 1
a = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 350 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _snake_case (_snake_case : Any , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] , _snake_case : str , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , ) -> List[str]:
_lowercase ={
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_lowercase , _lowercase =input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowercase =f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase)
assert base_extractor.is_extractable(_lowerCamelCase)
_lowercase =tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(_lowerCamelCase , _lowerCamelCase)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase =file_path.read_text(encoding='utf-8')
else:
_lowercase =output_path.read_text(encoding='utf-8')
_lowercase =text_file.read_text(encoding='utf-8')
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _snake_case (_snake_case : List[str] , _snake_case : Any , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[Any] , ) -> str:
_lowercase ={
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_lowercase =input_paths[compression_format]
if input_path is None:
_lowercase =f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase)
_lowercase =Extractor.infer_extractor_format(_lowerCamelCase)
assert extractor_format is not None
_lowercase =tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase =file_path.read_text(encoding='utf-8')
else:
_lowercase =output_path.read_text(encoding='utf-8')
_lowercase =text_file.read_text(encoding='utf-8')
assert extracted_file_content == expected_file_content
@pytest.fixture
def _snake_case (_snake_case : List[str] , _snake_case : List[Any]) -> List[str]:
import tarfile
_lowercase =tmp_path / 'data_dot_dot'
directory.mkdir()
_lowercase =directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(_lowerCamelCase , 'w') as f:
f.add(_lowerCamelCase , arcname=os.path.join('..' , text_file.name))
return path
@pytest.fixture
def _snake_case (_snake_case : Optional[Any]) -> Any:
import tarfile
_lowercase =tmp_path / 'data_sym_link'
directory.mkdir()
_lowercase =directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=_lowerCamelCase)
with tarfile.TarFile(_lowerCamelCase , 'w') as f:
f.add(str(directory / 'subdir') , arcname='subdir') # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def _snake_case (_snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[int]) -> Optional[int]:
_lowercase ={
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_lowercase =insecure_tar_files[insecure_tar_file]
_lowercase =tmp_path / 'extracted'
TarExtractor.extract(_lowerCamelCase , _lowerCamelCase)
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _snake_case (_snake_case : str) -> Optional[int]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_lowercase =tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_lowercase =(
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb') as f:
f.write(_lowerCamelCase)
assert zipfile.is_zipfile(str(_lowerCamelCase)) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_lowerCamelCase) # but we're right
| 700 |
import os
def _snake_case (_snake_case : Optional[Any]) -> int:
_lowercase =len(grid[0])
_lowercase =len(_snake_case)
_lowercase =0
_lowercase =0
_lowercase =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_snake_case):
for j in range(n_rows - 3):
_lowercase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_lowercase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_lowercase =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_lowercase =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_lowercase =max(
_snake_case , _snake_case , _snake_case , _snake_case)
if max_product > largest:
_lowercase =max_product
return largest
def _snake_case () -> str:
_lowercase =[]
with open(os.path.dirname(_snake_case) + '/grid.txt') as file:
for line in file:
grid.append(line.strip('\n').split(' '))
_lowercase =[[int(_snake_case) for i in grid[j]] for j in range(len(_snake_case))]
return largest_product(_snake_case)
if __name__ == "__main__":
print(solution())
| 557 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ( ):
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 436 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :List[Any] , *lowercase :List[str] , **lowercase :List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 201 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ={
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase ={
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
_lowerCAmelCase =F'''{src_lang}-{tgt_lang}'''
_lowerCAmelCase =F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(a__ , exist_ok=a__ )
_lowerCAmelCase =os.path.join(a__ , 'README.md' )
print(F'''Generating {path}''' )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(a__ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split('''-''')
lowercase_ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 58 | '''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : str = logging.get_logger(__name__)
def a__ ( lowercase : Any ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = torch.load(SCREAMING_SNAKE_CASE_, map_location='''cpu''' )
if "model" in sd.keys():
_UpperCamelCase = torch.load(SCREAMING_SNAKE_CASE_, map_location='''cpu''' )['model']
# pop unnecessary weights
_UpperCamelCase = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_UpperCamelCase = sd.pop(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
_UpperCamelCase = key.replace('''.qkv_proj.''', '''.q_proj.''' )
_UpperCamelCase = key.replace('''.qkv_proj.''', '''.k_proj.''' )
_UpperCamelCase = key.replace('''.qkv_proj.''', '''.v_proj.''' )
_UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_UpperCamelCase = torch.split(SCREAMING_SNAKE_CASE_, depth // 3, dim=0 )
_UpperCamelCase = q
_UpperCamelCase = k
_UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase : Any, lowercase : str, lowercase : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
_UpperCamelCase = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
_UpperCamelCase = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
_UpperCamelCase = OPTConfig()
_UpperCamelCase = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase__ : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 164 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = """▁"""
_UpperCamelCase : str = {"""vocab_file""": """prophetnet.tokenizer"""}
_UpperCamelCase : int = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
_UpperCamelCase : Tuple = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
_UpperCamelCase : Tuple = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_1_2,
}
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
__A = collections.OrderedDict()
with open(snake_case , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(snake_case ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCAmelCase , UpperCAmelCase="[SEP]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[UNK]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase = None , **UpperCAmelCase , )-> None:
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
__A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__A = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
__A = f"[unused{i}]"
__A = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__A = 12
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase )
def __getstate__( self )-> List[str]:
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self , UpperCAmelCase )-> Dict:
__A = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase )) + [1]
return ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
__A = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self )-> int:
return len(self.sp_model ) + self.fairseq_offset
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> str:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> List[Any]:
__A = ''''''.join(UpperCAmelCase ).replace(UpperCAmelCase , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A = os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , '''wb''' ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__A = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 710 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = MvpTokenizer
lowerCamelCase__ = MvpTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = filter_roberta_detectors
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
super().setUp()
__A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__A = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__A = {'''unk_token''': '''<unk>'''}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> List[str]:
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__A = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__A = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''labels''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
__A = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = ['''A long paragraph for summarization.''']
__A = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , text_target=UpperCAmelCase , return_tensors='''pt''' )
__A = inputs['''input_ids''']
__A = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__A = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__A = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__A = '''A, <mask> AllenNLP sentence.'''
__A = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
__A = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__A = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__A = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 341 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return "".join(sorted(_UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return word_by_signature[signature(_UpperCamelCase )]
UpperCamelCase__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
UpperCamelCase__ = sorted({word.strip().lower() for word in data.splitlines()})
UpperCamelCase__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCamelCase__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 620 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ : str = bs[:]
lowercase_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Optional[int] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = set()
lowercase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Dict = char
return pairs
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = VOCAB_FILES_NAMES
__lowerCamelCase: int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: str = ['input_ids', 'attention_mask']
def __init__( self : Tuple , a : Tuple , a : Tuple , a : int="replace" , a : Optional[int]="<s>" , a : Tuple="</s>" , a : Tuple="</s>" , a : Tuple="<s>" , a : Optional[Any]="<unk>" , a : Dict="<pad>" , a : List[str]="<mask>" , a : Tuple=False , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase_ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
lowercase_ : Any = json.load(a )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Dict = errors # how to handle errors in decoding
lowercase_ : Any = bytes_to_unicode()
lowercase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : List[str] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Optional[Any] = {}
lowercase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Any , a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[Any] = tuple(a )
lowercase_ : Tuple = get_pairs(a )
if not pairs:
return token
while True:
lowercase_ : Any = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Dict = bigram
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = 0
while i < len(a ):
try:
lowercase_ : Union[str, Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : Any = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Any = tuple(a )
lowercase_ : List[str] = new_word
if len(a ) == 1:
break
else:
lowercase_ : Union[str, Any] = get_pairs(a )
lowercase_ : List[str] = " ".join(a )
lowercase_ : Optional[int] = word
return word
def lowerCAmelCase__ ( self : Any , a : str ):
'''simple docstring'''
lowercase_ : Dict = []
for token in re.findall(self.pat , a ):
lowercase_ : Tuple = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : Tuple , a : Dict ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
return self.decoder.get(a )
def lowerCAmelCase__ ( self : int , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = "".join(a )
lowercase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
lowercase_ : Dict = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase_ : Optional[Any] = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , a : Any , a : int=False , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase_ : str = " " + text
return (text, kwargs)
| 620 | 1 |
class snake_case_ :
def __init__( self : Tuple , _snake_case : Dict , _snake_case : Union[str, Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = name
__lowerCAmelCase : Any = val
def __str__( self : Optional[int] )->Optional[int]:
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[Any] , _snake_case : Optional[Any] )->List[Any]:
'''simple docstring'''
return self.val < other.val
class snake_case_ :
def __init__( self : str , _snake_case : int )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : Optional[int] = self.build_heap(__lowerCamelCase )
def __getitem__( self : Optional[Any] , _snake_case : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
return self.get_value(__lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple , _snake_case : int )->Union[str, Any]:
'''simple docstring'''
return (idx - 1) // 2
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Union[str, Any] )->List[str]:
'''simple docstring'''
return idx * 2 + 1
def UpperCAmelCase__ ( self : Tuple , _snake_case : Union[str, Any] )->List[str]:
'''simple docstring'''
return idx * 2 + 2
def UpperCAmelCase__ ( self : List[str] , _snake_case : Dict )->int:
'''simple docstring'''
return self.heap_dict[key]
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : List[Any] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = len(__lowerCamelCase ) - 1
__lowerCAmelCase : Tuple = self.get_parent_idx(__lowerCamelCase )
for idx, i in enumerate(__lowerCamelCase ):
__lowerCAmelCase : Dict = idx
__lowerCAmelCase : Dict = i.val
for i in range(__lowerCamelCase , -1 , -1 ):
self.sift_down(__lowerCamelCase , __lowerCamelCase )
return array
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
while True:
__lowerCAmelCase : Dict = self.get_left_child_idx(__lowerCamelCase ) # noqa: E741
__lowerCAmelCase : Any = self.get_right_child_idx(__lowerCamelCase )
__lowerCAmelCase : str = idx
if l < len(__lowerCamelCase ) and array[l] < array[idx]:
__lowerCAmelCase : Optional[int] = l
if r < len(__lowerCamelCase ) and array[r] < array[smallest]:
__lowerCAmelCase : str = r
if smallest != idx:
__lowerCAmelCase : Optional[int] = array[smallest], array[idx]
(
__lowerCAmelCase
) : Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowerCAmelCase : Optional[int] = smallest
else:
break
def UpperCAmelCase__ ( self : Dict , _snake_case : Optional[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_parent_idx(__lowerCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowerCAmelCase : Optional[Any] = self.heap[idx], self.heap[p]
__lowerCAmelCase : List[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowerCAmelCase : Dict = p
__lowerCAmelCase : int = self.get_parent_idx(__lowerCamelCase )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
return self.heap[0]
def UpperCAmelCase__ ( self : str )->str:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.heap[-1], self.heap[0]
__lowerCAmelCase : Union[str, Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowerCAmelCase : str = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase__ ( self : Any , _snake_case : Any )->List[str]:
'''simple docstring'''
self.heap.append(__lowerCamelCase )
__lowerCAmelCase : Tuple = len(self.heap ) - 1
__lowerCAmelCase : str = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase__ ( self : Optional[Any] )->Dict:
'''simple docstring'''
return len(self.heap ) == 0
def UpperCAmelCase__ ( self : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] )->str:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowerCAmelCase : int = new_value
__lowerCAmelCase : Tuple = new_value
self.sift_up(self.idx_of_element[node] )
_UpperCAmelCase = Node('R', -1)
_UpperCAmelCase = Node('B', 6)
_UpperCAmelCase = Node('A', 3)
_UpperCAmelCase = Node('X', 1)
_UpperCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_UpperCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 702 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCAmelCase = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_UpperCAmelCase = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_UpperCAmelCase = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ['input_ids', 'attention_mask']
A_ = []
A_ = []
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any]=None , _snake_case : List[Any]=None , _snake_case : Dict="<s>" , _snake_case : List[str]="</s>" , _snake_case : Optional[Any]="</s>" , _snake_case : List[Any]="<pad>" , _snake_case : List[Any]="<unk>" , _snake_case : Dict="m2m100" , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : Any=8 , **_snake_case : str , )->None:
'''simple docstring'''
__lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase : Any = language_codes
__lowerCAmelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
__lowerCAmelCase : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
__lowerCAmelCase : Dict = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_snake_case )
for lang_code in fairseq_language_code
if self.get_lang_token(_snake_case ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_snake_case , tgt_lang=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , language_codes=_snake_case , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_snake_case , **_snake_case , )
__lowerCAmelCase : int = vocab_file
__lowerCAmelCase : Optional[int] = load_json(_snake_case )
__lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : Tuple = spm_file
__lowerCAmelCase : str = load_spm(_snake_case , self.sp_model_kwargs )
__lowerCAmelCase : List[Any] = len(self.encoder )
__lowerCAmelCase : int = {
self.get_lang_token(_snake_case ): self.encoder_size + i for i, lang_code in enumerate(_snake_case )
}
__lowerCAmelCase : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_snake_case )}
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
__lowerCAmelCase : int = src_lang if src_lang is not None else """en"""
__lowerCAmelCase : Dict = tgt_lang
__lowerCAmelCase : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__lowerCAmelCase : List[Any] = num_madeup_words
@property
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str )->None:
'''simple docstring'''
__lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str )->Dict:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_snake_case , self.encoder[self.unk_token] )
def UpperCAmelCase__ ( self : Any , _snake_case : int )->str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_snake_case , self.unk_token )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : List[str] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = []
__lowerCAmelCase : Any = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
__lowerCAmelCase : str = [1] * len(self.prefix_tokens )
__lowerCAmelCase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def UpperCAmelCase__ ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.__dict__.copy()
__lowerCAmelCase : Dict = None
return state
def __setstate__( self : Optional[int] , _snake_case : Dict )->None:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = Path(_snake_case )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
__lowerCAmelCase : int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowerCAmelCase : int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _snake_case )
elif not os.path.isfile(self.spm_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (str(_snake_case ), str(_snake_case ))
def UpperCAmelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : str = "en" , _snake_case : Optional[List[str]] = None , _snake_case : str = "ro" , **_snake_case : List[Any] , )->BatchEncoding:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = src_lang
__lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def UpperCAmelCase__ ( self : str , _snake_case : Tuple , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[int] )->Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowerCAmelCase : Tuple = src_lang
__lowerCAmelCase : Dict = self(_snake_case , add_special_tokens=_snake_case , **_snake_case )
__lowerCAmelCase : List[str] = self.get_lang_id(_snake_case )
__lowerCAmelCase : Optional[int] = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self : Optional[Any] )->int:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.get_lang_token(_snake_case )
__lowerCAmelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
__lowerCAmelCase : int = [self.cur_lang_id]
__lowerCAmelCase : Tuple = [self.eos_token_id]
def UpperCAmelCase__ ( self : str , _snake_case : str )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.get_lang_token(_snake_case )
__lowerCAmelCase : Dict = self.lang_token_to_id[lang_token]
__lowerCAmelCase : Union[str, Any] = [self.cur_lang_id]
__lowerCAmelCase : Union[str, Any] = [self.eos_token_id]
def UpperCAmelCase__ ( self : int , _snake_case : str )->str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_lang_token(_snake_case )
return self.lang_token_to_id[lang_token]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__lowerCAmelCase : Union[str, Any] = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE )
spm.Load(str(SCREAMING_SNAKE_CASE ) )
return spm
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> Union[Dict, List]:
with open(SCREAMING_SNAKE_CASE , """r""" ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str ) -> None:
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=2 ) | 240 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : int = 1000000 ):
'''simple docstring'''
lowerCamelCase = set(range(3 , lowerCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) )
lowerCamelCase = [float(lowerCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 457 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class a__ ( nn.Module ):
__magic_name__ : int
__magic_name__ : jnp.dtype = jnp.floataa
def lowercase__ (self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__(self : List[str], __UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = hidden_states.shape
SCREAMING_SNAKE_CASE : Dict = jax.image.resize(
__a, shape=(batch, height * 2, width * 2, channels), method='''nearest''', )
SCREAMING_SNAKE_CASE : Tuple = self.conv(__a )
return hidden_states
class a__ ( nn.Module ):
__magic_name__ : int
__magic_name__ : jnp.dtype = jnp.floataa
def lowercase__ (self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__(self : List[Any], __UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv(__a )
return hidden_states
class a__ ( nn.Module ):
__magic_name__ : int
__magic_name__ : int = None
__magic_name__ : float = 0.0
__magic_name__ : bool = None
__magic_name__ : jnp.dtype = jnp.floataa
def lowercase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE : int = nn.GroupNorm(num_groups=32, epsilon=1e-5 )
SCREAMING_SNAKE_CASE : List[str] = nn.Conv(
__a, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dense(__a, dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.GroupNorm(num_groups=32, epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Dict = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE : List[str] = nn.Conv(
__a, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
SCREAMING_SNAKE_CASE : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE : List[str] = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE : List[Any] = nn.Conv(
__a, kernel_size=(1, 1), strides=(1, 1), padding='''VALID''', dtype=self.dtype, )
def __call__(self : Optional[Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Any=True ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = hidden_states
SCREAMING_SNAKE_CASE : List[Any] = self.norma(__a )
SCREAMING_SNAKE_CASE : Any = nn.swish(__a )
SCREAMING_SNAKE_CASE : str = self.conva(__a )
SCREAMING_SNAKE_CASE : Any = self.time_emb_proj(nn.swish(__a ) )
SCREAMING_SNAKE_CASE : Any = jnp.expand_dims(jnp.expand_dims(__a, 1 ), 1 )
SCREAMING_SNAKE_CASE : str = hidden_states + temb
SCREAMING_SNAKE_CASE : Any = self.norma(__a )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.swish(__a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dropout(__a, __a )
SCREAMING_SNAKE_CASE : List[str] = self.conva(__a )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE : str = self.conv_shortcut(__a )
return hidden_states + residual
| 707 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class a__ ( _lowercase ):
__magic_name__ : int = 0
__magic_name__ : bool = False
__magic_name__ : float = 3.0
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs(), {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(), {'''a''': 2} )
self.assertDictEqual(MockClass(a=2, b=__UpperCAmelCase ).to_kwargs(), {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2, c=2.25 ).to_kwargs(), {'''a''': 2, '''c''': 2.25} )
@require_cuda
def lowercase__ (self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = GradScalerKwargs(init_scale=1024, growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE : Tuple = Accelerator(mixed_precision='''fp16''', kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale, 1024.0 )
self.assertEqual(scaler._growth_factor, 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor, 0.5 )
self.assertEqual(scaler._growth_interval, 2000 )
self.assertEqual(scaler._enabled, __UpperCAmelCase )
@require_multi_gpu
def lowercase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase, env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ = torch.nn.Linear(100, 200)
snake_case_ = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ = """"""
snake_case_ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 355 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( a_):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=9_9 , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : str=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[str]=3_7 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : List[str]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]="None" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : List[str]=None , ):
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = relative_attention
__magic_name__ = position_biased_input
__magic_name__ = pos_att_type
__magic_name__ = scope
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = self.get_config()
__magic_name__ = 3_0_0
return config
def a__ ( self : Optional[Any] , UpperCamelCase_ : Dict ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def a__ ( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ):
'''simple docstring'''
__magic_name__ = DebertaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
__magic_name__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
__magic_name__ = model(UpperCamelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def a__ ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
'''simple docstring'''
__magic_name__ = DebertaForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = DebertaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase_ )
def a__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = DebertaForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] ):
'''simple docstring'''
__magic_name__ = DebertaForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Tuple ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( a_ , a_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def a__ ( self : Tuple ):
'''simple docstring'''
__magic_name__ = DebertaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def a__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase_ )
def a__ ( self : Tuple ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase_ )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase_ )
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase_ )
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase_ )
@slow
def a__ ( self : List[str] ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = DebertaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def a__ ( self : int ):
'''simple docstring'''
pass
@slow
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
# compare the actual values for a slice.
__magic_name__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" ) | 545 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
snake_case : Tuple = 2_0_4_8
snake_case : str = 4_0_9_6
snake_case : int = 4_2
snake_case : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
snake_case : List[str] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def A ( __snake_case: Any ) -> Optional[int]:
"""simple docstring"""
def choose_first(__snake_case: Union[str, Any] , __snake_case: List[str]=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
__magic_name__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
__magic_name__ = {'id': example['id']}
__magic_name__ = example['annotations']
__magic_name__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ = ['yes'] if 1 in yes_no_answer else ['no']
__magic_name__ = __magic_name__ = []
__magic_name__ = __magic_name__ = []
__magic_name__ = ['<cls>']
else:
__magic_name__ = ['short']
__magic_name__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
__magic_name__ = ['long']
__magic_name__ = choose_first(annotation['long_answer'] , is_long_answer=__snake_case )
__magic_name__ = []
answer.update(__snake_case )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ = True
else:
__magic_name__ = False
__magic_name__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def A ( __snake_case: Any , __snake_case: str=False ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = example['document']['tokens']
__magic_name__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ = example['document']['tokens']
__magic_name__ = answer['start_token']
__magic_name__ = answer['end_token']
__magic_name__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ = doc['is_html'][answer['start_token'] : answer['end_token']]
__magic_name__ = doc['token'][answer['start_token'] : answer['end_token']]
__magic_name__ = ' '.join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , __snake_case , end='\n' )
print('Old:' , __snake_case , end='\n\n' )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def A ( __snake_case: List[Any] , __snake_case: int , __snake_case: Tuple=2_0_4_8 , __snake_case: List[str]=4_0_9_6 , __snake_case: Optional[int]=True ) -> Any:
"""simple docstring"""
__magic_name__ = get_context_and_ans(__snake_case , assertion=__snake_case )
__magic_name__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
__magic_name__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = []
__magic_name__ = []
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__snake_case ),
"end_token": [-1_0_0] * len(__snake_case ),
"category": category,
},
}
__magic_name__ = out['context'].split()
__magic_name__ = splitted_context[answer['end_token']]
__magic_name__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=__snake_case , ).input_ids )
__magic_name__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
__magic_name__ = answer['start_token']
__magic_name__ = answer['end_token']
if assertion:
__magic_name__ = tokenizer.decode(__snake_case )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , __snake_case , end='\n\n' )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = []
__magic_name__ = [] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ = start_token - i + q_len
__magic_name__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
__magic_name__ = -1_0_0
__magic_name__ = -1_0_0
answers_category.append('null' )
__magic_name__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(__snake_case ) )
print('Old:' , tokenizer.decode(__snake_case ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def A ( __snake_case: Tuple , __snake_case: Dict , __snake_case: Any=2_0_4_8 , __snake_case: List[Any]=4_0_9_6 , __snake_case: Any=False ) -> Tuple:
"""simple docstring"""
__magic_name__ = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def A ( __snake_case: Optional[int] , __snake_case: Union[str, Any] ) -> Dict:
"""simple docstring"""
with jsonlines.open(__snake_case , 'a' ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc='Saving samples ... ' ):
__magic_name__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
snake_case : Any = load_dataset("""natural_questions""")
snake_case : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
snake_case : Tuple = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
snake_case : List[Any] = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
snake_case : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
snake_case : int = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
snake_case : Union[str, Any] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name) | 545 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase , lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = load_tool('''text-to-speech''' )
self.tool.setup()
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = self.tool('''hey''' )
__lowerCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = self.tool('''hey''' )
__lowerCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 711 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622 | 0 |
"""simple docstring"""
import enum
import shutil
import sys
_snake_case , _snake_case = shutil.get_terminal_size()
_snake_case = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class UpperCamelCase ( enum.Enum ):
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Union[str, Any] = 1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__="" ):
'''simple docstring'''
sys.stdout.write(str(UpperCamelCase__ ) + end )
sys.stdout.flush()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="" ):
'''simple docstring'''
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
forceWrite("""\r""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def lowerCAmelCase__ ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def lowerCAmelCase__ ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 389 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 389 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : List[Any] = ["torch", "torchsde"]
def __init__( self : List[str] , *__lowerCAmelCase : Any , **__lowerCAmelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
@register_to_config
def __init__( self : Optional[Any] , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 3 , __lowerCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , __lowerCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , __lowerCAmelCase : Tuple[int] = (6_4,) , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "silu" , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 3_2 , __lowerCAmelCase : int = 2_5_6 , __lowerCAmelCase : int = 3_2 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : float = 0.1_82_15 , __lowerCAmelCase : str = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_lowerCamelCase : Optional[Any] = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
_lowerCamelCase : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCamelCase : List[str] = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
_lowerCamelCase : int = VectorQuantizer(__lowerCAmelCase , __lowerCAmelCase , beta=0.25 , remap=__lowerCAmelCase , sane_index_shape=__lowerCAmelCase )
_lowerCamelCase : Dict = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
# pass init params to Decoder
_lowerCamelCase : List[str] = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , norm_type=__lowerCAmelCase , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = True ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.encoder(__lowerCAmelCase )
_lowerCamelCase : int = self.quant_conv(__lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCAmelCase )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ):
"""simple docstring"""
if not force_not_quantize:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.quantize(__lowerCAmelCase )
else:
_lowerCamelCase : Dict = h
_lowerCamelCase : List[Any] = self.post_quant_conv(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.decoder(__lowerCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = True ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = sample
_lowerCamelCase : Tuple = self.encode(__lowerCAmelCase ).latents
_lowerCamelCase : Optional[int] = self.decode(__lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
| 598 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = 2
while True:
UpperCAmelCase_ = factor_map.pop(_snake_case , _snake_case )
if factor:
UpperCAmelCase_ = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase_ = factor
else:
UpperCAmelCase_ = prime
yield prime
prime += 1
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] = 1E10 ):
'''simple docstring'''
UpperCAmelCase_ = sieve()
UpperCAmelCase_ = 1
while True:
UpperCAmelCase_ = next(_snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 390 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,**A ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,A ,)
super().__init__(*A ,**A )
| 341 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[Any]=[0, 1, 2, 3] , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : List[Any]=37 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Union[str, Any]=[1, 384, 24, 24] , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=None , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = backbone_out_indices
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = num_labels
_A = backbone_featmap_shape
_A = scope
_A = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Tuple ):
_A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ):
_A = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ):
_A = self.num_labels
_A = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = self.num_labels
_A = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : int ):
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
def lowerCAmelCase_ ( self : int ):
_A = DPTModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
if model_class in get_values(_UpperCAmelCase ):
continue
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = False
_A = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_A = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_A = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_A = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_A = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 'add'
with self.assertRaises(_UpperCAmelCase ):
_A = DPTForDepthEstimation(_UpperCAmelCase )
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
_A = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_A = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.predicted_depth
# verify the predicted depth
_A = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _UpperCAmelCase )
_A = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _UpperCAmelCase , atol=1E-4 ) )
| 505 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : str , ):
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
_A = field
_A = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
_A = Json(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , field=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
_A = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_A = None
_A = None
_A = None
_A = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
_A = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dataset , _UpperCAmelCase : Union[PathLike, BinaryIO] , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : int , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A = dataset
_A = path_or_buf
_A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A = num_proc
_A = 'utf-8'
_A = to_json_kwargs
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.to_json_kwargs.pop('path_or_buf' , _UpperCAmelCase )
_A = self.to_json_kwargs.pop('orient' , 'records' )
_A = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
_A = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
_A = self.to_json_kwargs.pop('compression' , _UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=_UpperCAmelCase ) as buffer:
_A = self._write(file_obj=_UpperCAmelCase , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
_A = self._write(
file_obj=self.path_or_buf , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **self.to_json_kwargs )
return written
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] ):
_A , _A , _A , _A , _A = args
_A = query_table(
table=self.dataset.data , key=slice(_UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_A = batch.to_pandas().to_json(
path_or_buf=_UpperCAmelCase , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **_UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : BinaryIO , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , **_UpperCAmelCase : Optional[Any] , ):
_A = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
_A = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_UpperCAmelCase )
else:
_A , _A = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _UpperCAmelCase , _UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(_UpperCAmelCase )
return written
| 505 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a = logging.get_logger(__name__)
a = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = 'mctct'
def __init__( self : Optional[int] , UpperCamelCase__ : Any=8_065 , UpperCamelCase__ : Any=1_536 , UpperCamelCase__ : List[Any]=36 , UpperCamelCase__ : Dict=6_144 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Tuple=384 , UpperCamelCase__ : str=920 , UpperCamelCase__ : Union[str, Any]=1e-5 , UpperCamelCase__ : List[Any]=0.3 , UpperCamelCase__ : List[str]="relu" , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Union[str, Any]=0.3 , UpperCamelCase__ : Optional[Any]=0.3 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[int]=0.3 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[Any]=(7,) , UpperCamelCase__ : Optional[Any]=(3,) , UpperCamelCase__ : Any=80 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : str="sum" , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = num_attention_heads
lowercase_ = attention_head_dim
lowercase_ = max_position_embeddings
lowercase_ = layer_norm_eps
lowercase_ = layerdrop
lowercase_ = hidden_act
lowercase_ = initializer_range
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = pad_token_id
lowercase_ = bos_token_id
lowercase_ = eos_token_id
lowercase_ = conv_glu_dim
lowercase_ = conv_dropout
lowercase_ = num_conv_layers
lowercase_ = input_feat_per_channel
lowercase_ = input_channels
lowercase_ = conv_channels
lowercase_ = ctc_loss_reduction
lowercase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowercase_ = list(UpperCamelCase__ )
lowercase_ = list(UpperCamelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 412 |
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ):
lowercase_ = set(range(3 , UpperCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase__ , UpperCAmelCase__ ) ) )
lowercase_ = [float(UpperCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase__ , limit + 1 , UpperCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 412 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=a_ ):
SCREAMING_SNAKE_CASE : List[str] = ['''torch''', '''torchsde''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 514 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case ( snake_case : List[str] , snake_case : int="shi-labs/oneformer_demo" ) -> Any:
"""simple docstring"""
with open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) as f:
lowerCAmelCase = json.load(snake_case )
lowerCAmelCase = {}
lowerCAmelCase = []
lowerCAmelCase = []
for key, info in class_info.items():
lowerCAmelCase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(snake_case ) )
lowerCAmelCase = thing_ids
lowerCAmelCase = class_names
return metadata
class _snake_case ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_55 , _SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" , _SCREAMING_SNAKE_CASE="ade20k_panoptic.json" , _SCREAMING_SNAKE_CASE=10 , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
lowerCAmelCase = class_info_file
lowerCAmelCase = prepare_metadata(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = num_text
lowerCAmelCase = repo_path
# for the post_process_functions
lowerCAmelCase = 2
lowerCAmelCase = 10
lowerCAmelCase = 10
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = num_labels
lowerCAmelCase = do_reduce_labels
lowerCAmelCase = ignore_index
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
lowerCAmelCase , lowerCAmelCase = image.size
else:
lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase = self.size['shortest_edge']
elif w > h:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = self.size['shortest_edge']
else:
lowerCAmelCase = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
lowerCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
SCREAMING_SNAKE_CASE : str = image_processing_class
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'ignore_index' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'class_info_file' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'num_text' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'repo_path' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'metadata' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_reduce_labels' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processing_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="np" ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCAmelCase = self.image_processing_tester.num_labels
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
lowerCAmelCase = num_labels
if is_instance_map:
lowerCAmelCase = list(range(_SCREAMING_SNAKE_CASE ) ) * 2
lowerCAmelCase = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCAmelCase = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for annotation in annotations]
lowerCAmelCase = image_processor(
_SCREAMING_SNAKE_CASE , ['semantic'] * len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , return_tensors='pt' , instance_id_to_semantic_id=_SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE , )
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def common(_SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=_SCREAMING_SNAKE_CASE , is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs['mask_labels']
lowerCAmelCase = inputs['class_labels']
lowerCAmelCase = inputs['pixel_values']
lowerCAmelCase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_SCREAMING_SNAKE_CASE )
common(is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type='pil' )
common(is_instance_map=_SCREAMING_SNAKE_CASE , segmentation_type='pil' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = np.zeros((20, 50) )
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = binary_mask_to_rle(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(_SCREAMING_SNAKE_CASE , target_sizes=_SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase = image_processor.post_process_instance_segmentation(_SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(_SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase = image_processor.post_process_panoptic_segmentation(_SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(_SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 514 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
lowercase__ : int = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : List[str] = num_channels
lowercase__ : Dict = is_training
lowercase__ : Dict = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : int = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : Union[str, Any] = num_patches + 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = TFViTModel(config=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
lowercase__ : Optional[Any] = self.image_size // 2
lowercase__ : Tuple = pixel_values[:, :, :image_size, :image_size]
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
lowercase__ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = self.type_sequence_label_size
lowercase__ : Any = TFViTForImageClassification(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
lowercase__ : str = self.image_size // 2
lowercase__ : Dict = pixel_values[:, :, :image_size, :image_size]
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowercase__ : Any = 1
lowercase__ : Any = TFViTForImageClassification(SCREAMING_SNAKE_CASE_)
lowercase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : Dict = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Union[str, Any] = False
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = TFViTModelTester(self)
lowercase__ : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37)
def lowercase__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""")
def lowercase__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""")
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE_)
lowercase__ : int = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""")
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowercase__ ( self):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""") if is_vision_available() else None
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""")
lowercase__ : Optional[int] = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : Any = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""")
# forward pass
lowercase__ : Any = model(**SCREAMING_SNAKE_CASE_)
# verify the logits
lowercase__ : Dict = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6])
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4)
| 12 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : List[str] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A_( A : list[float]):
UpperCamelCase = []
UpperCamelCase = len(A)
for i in range(A):
UpperCamelCase = -1
for j in range(i + 1 , A):
if arr[i] < arr[j]:
UpperCamelCase = arr[j]
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = []
for i, outer in enumerate(A):
UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase = inner
break
result.append(A)
return result
def A_( A : list[float]):
UpperCamelCase = len(A)
UpperCamelCase = []
UpperCamelCase = [-1] * arr_size
for index in reversed(range(A)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 3 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( __snake_case ):
UpperCamelCase = (DDIMParallelScheduler,)
UpperCamelCase = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def A__ ( self :List[Any] , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : int ={
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**__UpperCamelCase )
return config
def A__ ( self :Optional[Any] , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : Any =self.scheduler_classes[0]
__magic_name__ : Union[str, Any] =self.get_scheduler_config(**__UpperCamelCase )
__magic_name__ : Optional[int] =scheduler_class(**__UpperCamelCase )
__magic_name__ , __magic_name__ : Union[str, Any] =10, 0.0
__magic_name__ : Union[str, Any] =self.dummy_model()
__magic_name__ : Optional[Any] =self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for t in scheduler.timesteps:
__magic_name__ : Dict =model(__UpperCamelCase , __UpperCamelCase )
__magic_name__ : Dict =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def A__ ( self :Optional[int] ):
'''simple docstring'''
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
__magic_name__ : Dict =self.scheduler_classes[0]
__magic_name__ : List[str] =self.get_scheduler_config(steps_offset=1 )
__magic_name__ : List[str] =scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def A__ ( self :Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def A__ ( self :Optional[int] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def A__ ( self :int ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__UpperCamelCase )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__UpperCamelCase )
def A__ ( self :Tuple ):
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def A__ ( self :Any ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__UpperCamelCase )
def A__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=__UpperCamelCase , num_inference_steps=__UpperCamelCase )
def A__ ( self :Tuple ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__UpperCamelCase , eta=__UpperCamelCase )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =self.scheduler_classes[0]
__magic_name__ : List[Any] =self.get_scheduler_config()
__magic_name__ : Optional[Any] =scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Tuple =self.scheduler_classes[0]
__magic_name__ : Optional[Any] =self.get_scheduler_config()
__magic_name__ : Optional[int] =scheduler_class(**__UpperCamelCase )
__magic_name__ , __magic_name__ : Dict =10, 0.0
scheduler.set_timesteps(__UpperCamelCase )
__magic_name__ : str =self.dummy_model()
__magic_name__ : str =self.dummy_sample_deter
__magic_name__ : List[Any] =self.dummy_sample_deter + 0.1
__magic_name__ : List[Any] =self.dummy_sample_deter - 0.1
__magic_name__ : int =samplea.shape[0]
__magic_name__ : str =torch.stack([samplea, samplea, samplea] , dim=0 )
__magic_name__ : Optional[int] =torch.arange(__UpperCamelCase )[0:3, None].repeat(1 , __UpperCamelCase )
__magic_name__ : int =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__magic_name__ : Any =scheduler.batch_step_no_noise(__UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __UpperCamelCase )
__magic_name__ : Tuple =torch.sum(torch.abs(__UpperCamelCase ) )
__magic_name__ : Any =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : int =self.full_loop()
__magic_name__ : Optional[int] =torch.sum(torch.abs(__UpperCamelCase ) )
__magic_name__ : Tuple =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.full_loop(prediction_type="""v_prediction""" )
__magic_name__ : Dict =torch.sum(torch.abs(__UpperCamelCase ) )
__magic_name__ : List[Any] =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : int =self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
__magic_name__ : str =torch.sum(torch.abs(__UpperCamelCase ) )
__magic_name__ : List[str] =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
__magic_name__ : Optional[int] =torch.sum(torch.abs(__UpperCamelCase ) )
__magic_name__ : Tuple =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 702 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , ):
__magic_name__ : Optional[int] ={}
if train_file is not None:
__magic_name__ : Optional[int] =[train_file]
if eval_file is not None:
__magic_name__ : Any =[eval_file]
if test_file is not None:
__magic_name__ : int =[test_file]
__magic_name__ : Any =datasets.load_dataset("""csv""" , data_files=lowerCamelCase )
__magic_name__ : Optional[Any] =list(ds[list(files.keys() )[0]].features.keys() )
__magic_name__ : Optional[Any] =features_name.pop(lowerCamelCase )
__magic_name__ : str =list(set(ds[list(files.keys() )[0]][label_name] ) )
__magic_name__ : Union[str, Any] ={label: i for i, label in enumerate(lowerCamelCase )}
__magic_name__ : Dict =tokenizer.model_input_names
__magic_name__ : Any ={}
if len(lowerCamelCase ) == 1:
for k in files.keys():
__magic_name__ : Dict =ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" ) , batched=lowerCamelCase , )
elif len(lowerCamelCase ) == 2:
for k in files.keys():
__magic_name__ : Optional[Any] =ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" , ) , batched=lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__magic_name__ : Any ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Any =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__magic_name__ : Dict ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : str =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__magic_name__ : Union[str, Any] ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Optional[int] =labelaid[ex[label_name]]
yield (d, label)
__magic_name__ : Union[str, Any] =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__magic_name__ : Optional[Any] =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__magic_name__ : Optional[Any] =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__magic_name__ : Any =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__magic_name__ : Any =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__magic_name__ : Optional[int] =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ : int = logging.getLogger(__name__)
@dataclass
class __A :
UpperCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the training file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the development file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the test file"""} )
UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __A :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ : List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : Dict =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__magic_name__ : Any =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase ) , labelaid=lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__magic_name__ : Any =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase ) -> Dict:
__magic_name__ : Tuple =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__magic_name__ : int =TFTrainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ : List[str] ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : List[str] =trainer.evaluate()
__magic_name__ : Optional[Any] =os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 367 | 0 |
def _snake_case ( __snake_case ):
return 10 - x * x
def _snake_case ( __snake_case , __snake_case ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_a ) * equation(_a ) >= 0:
raise ValueError('''Wrong space!''' )
_UpperCamelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCamelCase = (a + b) / 2
# Check if middle point is root
if equation(_a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_a ) * equation(_a ) < 0:
_UpperCamelCase = c
else:
_UpperCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 10 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = tempfile.mkdtemp()
UpperCAmelCase_: Union[str, Any] = BlipImageProcessor()
UpperCAmelCase_: int = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCAmelCase_: int = BlipaProcessor(A__ , A__ )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self , **A__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).tokenizer
def snake_case_ ( self , **A__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def snake_case_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_: Optional[int] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_: int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_: int = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
UpperCAmelCase_: str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.get_image_processor()
UpperCAmelCase_: str = self.get_tokenizer()
UpperCAmelCase_: Tuple = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase_: List[str] = self.prepare_image_inputs()
UpperCAmelCase_: List[str] = image_processor(A__ , return_tensors="np" )
UpperCAmelCase_: List[Any] = processor(images=A__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.get_image_processor()
UpperCAmelCase_: Any = self.get_tokenizer()
UpperCAmelCase_: Tuple = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase_: Optional[int] = "lower newer"
UpperCAmelCase_: int = processor(text=A__ )
UpperCAmelCase_: str = tokenizer(A__ , return_token_type_ids=A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = self.get_image_processor()
UpperCAmelCase_: Optional[Any] = self.get_tokenizer()
UpperCAmelCase_: Dict = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase_: Optional[Any] = "lower newer"
UpperCAmelCase_: Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_: Dict = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: int = self.get_image_processor()
UpperCAmelCase_: Tuple = self.get_tokenizer()
UpperCAmelCase_: Tuple = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase_: Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_: List[str] = processor.batch_decode(A__ )
UpperCAmelCase_: Optional[int] = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = self.get_image_processor()
UpperCAmelCase_: List[str] = self.get_tokenizer()
UpperCAmelCase_: Optional[Any] = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
UpperCAmelCase_: Tuple = "lower newer"
UpperCAmelCase_: Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase_: Union[str, Any] = processor(text=A__ , images=A__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 137 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 547 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 547 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any]=3 , _A : Optional[int]=3_2 , _A : int=3 , _A : Optional[int]=1_0 , _A : str=[1_0, 2_0, 3_0, 4_0] , _A : int=[1, 1, 2, 1] , _A : int=True , _A : Optional[Any]=True , _A : Optional[Any]="relu" , _A : str=3 , _A : str=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : Any = embeddings_size
_SCREAMING_SNAKE_CASE : int = hidden_sizes
_SCREAMING_SNAKE_CASE : Optional[Any] = depths
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Dict = num_labels
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : Dict = len(_A)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_labels)
_SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = TFResNetModel(config=_A)
_SCREAMING_SNAKE_CASE : List[Any] = model(_A)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : Optional[Any] , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.num_labels
_SCREAMING_SNAKE_CASE : List[str] = TFResNetForImageClassification(_A)
_SCREAMING_SNAKE_CASE : Dict = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = config_and_inputs
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a = False
a = False
a = False
a = False
a = False
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = TFResNetModelTester(self)
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_A , has_text_modality=_A)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = model_class(_A)
_SCREAMING_SNAKE_CASE : Any = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
def check_hidden_states_output(_A : int , _A : Optional[int] , _A : Dict):
_SCREAMING_SNAKE_CASE : Tuple = model_class(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(_A , _A))
_SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_A) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE : Dict = layer_type
_SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(_A , _A , _A)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@slow
def _lowerCAmelCase ( self : int):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFResNetModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
_SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=_A , return_tensors="""tf""")
# forward pass
_SCREAMING_SNAKE_CASE : int = model(**_A)
# verify the logits
_SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4))
| 338 | 0 |
"""simple docstring"""
from collections.abc import Callable
class a :
def __init__( self : Dict , __lowerCAmelCase : Callable | None = None ):
# Stores actual heap items.
_UpperCAmelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase = {}
# Stores current size of heap.
_UpperCAmelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase = key or (lambda __lowerCAmelCase : x)
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
return int((i - 1) / 2 ) if i > 0 else None
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : int ):
_UpperCAmelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ):
_UpperCAmelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase , _UpperCAmelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase , _UpperCAmelCase = self.arr[j], self.arr[i]
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ):
return self.arr[i][1] < self.arr[j][1]
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self._left(__lowerCAmelCase )
_UpperCAmelCase = self._right(__lowerCAmelCase )
_UpperCAmelCase = i
if left is not None and not self._cmp(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = left
if right is not None and not self._cmp(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = right
return valid_parent
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int ):
_UpperCAmelCase = self._parent(__lowerCAmelCase )
while parent is not None and not self._cmp(__lowerCAmelCase , __lowerCAmelCase ):
self._swap(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = parent, self._parent(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int ):
_UpperCAmelCase = self._get_valid_parent(__lowerCAmelCase )
while valid_parent != index:
self._swap(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = valid_parent, self._get_valid_parent(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
_UpperCAmelCase = [item, self.key(__lowerCAmelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowerCAmelCase )
self._heapify_down(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int ):
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase = self.arr[self.size - 1]
_UpperCAmelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowerCAmelCase )
self._heapify_down(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int ):
_UpperCAmelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__lowerCAmelCase )] )
else:
_UpperCAmelCase = [item, self.key(__lowerCAmelCase )]
_UpperCAmelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.arr[0] if self.size else None
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | """simple docstring"""
import datasets
UpperCAmelCase__ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCAmelCase__ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCAmelCase__ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
| 275 | 1 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=a__ ):
SCREAMING_SNAKE_CASE__ =["transformers", "torch", "note_seq"]
def __init__( self, *_a, **_a ) -> Any:
requires_backends(self, ["transformers", "torch", "note_seq"] )
@classmethod
def __lowerCAmelCase ( cls, *_a, **_a ) -> Tuple:
requires_backends(cls, ["transformers", "torch", "note_seq"] )
@classmethod
def __lowerCAmelCase ( cls, *_a, **_a ) -> Optional[int]:
requires_backends(cls, ["transformers", "torch", "note_seq"] )
| 693 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
'''simple docstring'''
def _lowercase ( self , _lowercase ):
"""simple docstring"""
raise NotImplementedError()
def _lowercase ( self ):
"""simple docstring"""
raise NotImplementedError()
class UpperCAmelCase_ ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase = False , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = tokenizer
_lowerCAmelCase = skip_prompt
_lowerCAmelCase = decode_kwargs
# variables used in the streaming process
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = True
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
_lowerCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
_lowerCAmelCase = text[self.print_len :]
_lowerCAmelCase = []
_lowerCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase = text[self.print_len :]
self.print_len += len(_lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(_lowerCAmelCase )
self.on_finalized_text(_lowerCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
_lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase = text[self.print_len :]
_lowerCAmelCase = []
_lowerCAmelCase = 0
else:
_lowerCAmelCase = """"""
_lowerCAmelCase = True
self.on_finalized_text(_lowerCAmelCase , stream_end=_lowerCAmelCase )
def _lowercase ( self , _lowercase , _lowercase = False ):
"""simple docstring"""
print(_lowerCAmelCase , flush=_lowerCAmelCase , end="""""" if not stream_end else None )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
class UpperCAmelCase_ ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase = False , _lowercase = None , **_lowercase ):
"""simple docstring"""
super().__init__(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = Queue()
_lowerCAmelCase = None
_lowerCAmelCase = timeout
def _lowercase ( self , _lowercase , _lowercase = False ):
"""simple docstring"""
self.text_queue.put(_lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 709 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = np.max(__lowerCamelCase , axis=-1 , keepdims=__lowerCamelCase )
_lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "second_text" in kwargs:
_lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None ):
"""simple docstring"""
return self.tokenizer(_lowercase , text_pair=_lowercase , return_tensors=self.framework )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.model(**_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.logits[0].numpy()
_lowerCAmelCase = softmax(_lowercase )
_lowerCAmelCase = np.argmax(_lowercase )
_lowerCAmelCase = self.model.config.idalabel[best_class]
_lowerCAmelCase = probabilities[best_class].item()
_lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 162 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a , type=a , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
a__ = parser.parse_args()
return args
def lowerCAmelCase_ ( a : int ):
def fn(a : int ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase_ ( a : str ):
a__ = []
for i in range(len(tokenized_data['input_ids'] ) ):
a__ = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
a__ = tf.train.Features(feature=a )
a__ = tf.train.Example(features=a )
a__ = example.SerializeToString()
records.append(a )
return records
def lowerCAmelCase_ ( a : Any ):
a__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ = min(len(a ) , args.limit )
a__ = dataset.select(range(a ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
a__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ = tokenize_function(a )
a__ = dataset.map(a , batched=a , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a : Optional[Any] ):
# Concatenate all texts.
a__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ = dataset_tokenized.map(a , batched=a , batch_size=1000 , num_proc=4 )
a__ = 0
a__ = 0
for shard in range(0 , len(a ) , args.shard_size ):
a__ = grouped_dataset[shard : shard + args.shard_size]
a__ = len(dataset_snapshot['input_ids'] )
a__ = os.path.join(a , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
a__ = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
a__ = serialized_examples[i]
out_file.write(a )
print('Wrote file {} containing {} records'.format(a , a ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=a )
if __name__ == "__main__":
__A : str = parse_args()
main(args)
| 394 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def lowercase_ ( ) -> str:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def lowercase_ ( ) -> int:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head("https://huggingface.co" ) | 718 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 57 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCamelCase__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = create_model(
'''HTSAT-tiny''' , '''roberta''' , lowerCAmelCase__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=lowerCAmelCase__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = R'''.*sequential.(\d+).*'''
UpperCAmelCase__ : Optional[int] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase__ : List[str] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase__ : Any = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase__ : Any = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase__ : Dict = 1 if projecton_layer == 0 else 2
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase__ : Dict = value
UpperCAmelCase__ : List[Any] = mixed_qkv.size(0 ) // 3
UpperCAmelCase__ : str = mixed_qkv[:qkv_dim]
UpperCAmelCase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase__ : Union[str, Any] = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase__ : str = query_layer
UpperCAmelCase__ : Tuple = key_layer
UpperCAmelCase__ : int = value_layer
else:
UpperCAmelCase__ : List[Any] = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase__ : Union[str, Any] = clap_model.state_dict()
UpperCAmelCase__ : Optional[int] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = ClapConfig()
UpperCAmelCase__ : Tuple = enable_fusion
UpperCAmelCase__ : Union[str, Any] = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
UpperCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 75 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 1 |
from __future__ import annotations
from collections import deque
class _UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : list[str] ):
'''simple docstring'''
__a : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE__ )
self.set_fail_transitions()
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Optional[int] = 0
for character in keyword:
__a : Tuple = self.find_next_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__a : str = len(self.adlist ) - 1
else:
__a : Dict = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE__ )
__a : Dict = 0
while q:
__a : List[str] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE__ )
__a : Dict = self.adlist[r]['fail_state']
while (
self.find_next_state(SCREAMING_SNAKE_CASE__ , self.adlist[child]['value'] ) is None
and state != 0
):
__a : List[str] = self.adlist[state]['fail_state']
__a : List[Any] = self.find_next_state(
SCREAMING_SNAKE_CASE__ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__a : Dict = 0
__a : Tuple = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : dict = {} # returns a dict with keywords and list of its occurrences
__a : Any = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE__ , string[i] ) is None
and current_state != 0
):
__a : Any = self.adlist[current_state]['fail_state']
__a : str = self.find_next_state(SCREAMING_SNAKE_CASE__ , string[i] )
if next_state is None:
__a : Optional[int] = 0
else:
__a : Dict = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__a : Dict = []
result[key].append(i - len(SCREAMING_SNAKE_CASE__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 12_8022
SCREAMING_SNAKE_CASE__ = 12_8028
@require_sentencepiece
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = MaMaaaTokenizer
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().setUp()
__a : Dict = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : Any = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__a : List[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Dict = '</s>'
__a : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[str] = self.get_tokenizer()
__a : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Dict = self.get_tokenizer()
__a : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2, 3, 4, 5, 6] , )
__a : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__a : Optional[int] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'This is a test' )
@slow
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Tuple = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/m2m100_418M'''
__SCREAMING_SNAKE_CASE : List[Any] = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__SCREAMING_SNAKE_CASE : Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
__a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__a : Optional[int] = 1
return cls
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Union[str, Any] = 'en'
__a : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
# fmt: off
__a : Any = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__a : List[str] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Tuple = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = 'en'
__a : int = 'fr'
__a : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : str = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__a : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__a : Optional[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__a : Optional[int] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# en_XX, A, test, EOS
'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 1_2_8_0_0_6,
} , )
| 577 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> Optional[int]:
return cls()
@dataclass
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : KarrasVeSchedulerState
class A_ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : str ) -> Dict:
return True
@register_to_config
def __init__( self : List[Any] ,__A : float = 0.02 ,__A : float = 100 ,__A : float = 1.007 ,__A : float = 80 ,__A : float = 0.05 ,__A : float = 50 ,) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
return KarrasVeSchedulerState.create()
def __UpperCAmelCase ( self : List[str] ,__A : KarrasVeSchedulerState ,__A : int ,__A : Tuple = () ) -> KarrasVeSchedulerState:
_lowercase = jnp.arange(0 ,__A )[::-1].copy()
_lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A ,schedule=jnp.array(__A ,dtype=jnp.floataa ) ,timesteps=__A ,)
def __UpperCAmelCase ( self : List[Any] ,__A : KarrasVeSchedulerState ,__A : jnp.ndarray ,__A : float ,__A : random.KeyArray ,) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_lowercase = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 )
else:
_lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowercase = random.split(__A ,num=1 )
_lowercase = self.config.s_noise * random.normal(key=__A ,shape=sample.shape )
_lowercase = sigma + gamma * sigma
_lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCAmelCase ( self : Union[str, Any] ,__A : KarrasVeSchedulerState ,__A : jnp.ndarray ,__A : float ,__A : float ,__A : jnp.ndarray ,__A : bool = True ,) -> Union[FlaxKarrasVeOutput, Tuple]:
_lowercase = sample_hat + sigma_hat * model_output
_lowercase = (sample_hat - pred_original_sample) / sigma_hat
_lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A ,derivative=__A ,state=__A )
def __UpperCAmelCase ( self : List[Any] ,__A : KarrasVeSchedulerState ,__A : jnp.ndarray ,__A : float ,__A : float ,__A : jnp.ndarray ,__A : jnp.ndarray ,__A : jnp.ndarray ,__A : bool = True ,) -> Union[FlaxKarrasVeOutput, Tuple]:
_lowercase = sample_prev + sigma_prev * model_output
_lowercase = (sample_prev - pred_original_sample) / sigma_prev
_lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A ,derivative=__A ,state=__A )
def __UpperCAmelCase ( self : int ,__A : KarrasVeSchedulerState ,__A : Optional[Any] ,__A : Optional[int] ,__A : Tuple ) -> Optional[int]:
raise NotImplementedError() | 67 |
import math
class __lowercase :
def _a(self : Dict , snake_case : list[list[float]] , snake_case : list[int] ) -> int:
_lowercase : List[Any] = 0.0
_lowercase : Any = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _a(self : Dict , snake_case : list[list[int | float]] , snake_case : list[int] , snake_case : int , snake_case : float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase ( ) -> None:
'''simple docstring'''
_lowercase : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_lowercase : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_lowercase : List[Any] = SelfOrganizingMap()
_lowercase : Any = 3
_lowercase : Optional[Any] = 0.5
for _ in range(_UpperCAmelCase ):
for j in range(len(_UpperCAmelCase ) ):
# training sample
_lowercase : Optional[int] = training_samples[j]
# Compute the winning vector
_lowercase : Tuple = self_organizing_map.get_winner(_UpperCAmelCase , _UpperCAmelCase )
# Update the winning vector
_lowercase : str = self_organizing_map.update(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# classify test sample
_lowercase : Any = [0, 0, 0, 1]
_lowercase : Dict = self_organizing_map.get_winner(_UpperCAmelCase , _UpperCAmelCase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 461 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A__ = datasets.logging.get_logger(__name__)
A__ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
A__ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
A__ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
A__ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def _SCREAMING_SNAKE_CASE ( self: List[Any] , __UpperCamelCase: str ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
__magic_name__ = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
__magic_name__ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__magic_name__ = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__magic_name__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__magic_name__ = score.BleurtScorer(os.path.join(__UpperCamelCase , __UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self: Any , __UpperCamelCase: str , __UpperCamelCase: Any ):
'''simple docstring'''
__magic_name__ = self.scorer.score(references=__UpperCamelCase , candidates=__UpperCamelCase )
return {"scores": scores}
| 720 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = DownBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
__magic_name__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Dict = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : int = CrossAttnDownBlockaD # noqa F405
_lowercase : Any = "down"
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : List[str] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = SkipDownBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Tuple = AttnSkipDownBlockaD # noqa F405
_lowercase : str = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[int] = DownEncoderBlockaD # noqa F405
_lowercase : List[str] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'out_channels': 32,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = AttnDownEncoderBlockaD # noqa F405
_lowercase : Optional[Any] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'out_channels': 32,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = UNetMidBlockaD # noqa F405
_lowercase : Any = "mid"
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'temb_channels': 1_28,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : int = "mid"
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = "mid"
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = UpBlockaD # noqa F405
_lowercase : List[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = ResnetUpsampleBlockaD # noqa F405
_lowercase : Dict = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : Union[str, Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Tuple = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase , include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = AttnUpBlockaD # noqa F405
_lowercase : Optional[int] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = SkipUpBlockaD # noqa F405
_lowercase : int = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
_lowercase : Optional[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[str] = UpDecoderBlockaD # noqa F405
_lowercase : List[str] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = {'in_channels': 32, 'out_channels': 32}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = AttnUpDecoderBlockaD # noqa F405
_lowercase : Any = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = {'in_channels': 32, 'out_channels': 32}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__UpperCamelCase )
| 184 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" ):
lowercase__ = AutoTokenizer.from_pretrained(a__ )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
a__ , batched=a__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=a__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(a__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(a__ )
lowercase__ , lowercase__ = get_dataloaders(a__ , a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(a__ , return_dict=a__ )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=a__ )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(a__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=0 , num_training_steps=a__ , )
else:
lowercase__ = DummyScheduler(a__ , total_num_steps=a__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(a__ , a__ ):
model.train()
for step, batch in enumerate(a__ ):
lowercase__ = model(**a__ )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**a__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a__ ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a__ , references=a__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , a__ )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(a__ , a__ )
def __lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=a__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=a__ , )
parser.add_argument(
"--output_dir" , type=a__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=a__ , default=a__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=a__ , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 413 | from __future__ import annotations
from collections import Counter
from random import random
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> None:
"""simple docstring"""
_UpperCamelCase = {}
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : float) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(lowercase_)
if nodea not in self.connections:
self.add_node(lowercase_)
_UpperCamelCase = probability
def __UpperCAmelCase ( self : Any) -> list[str]:
"""simple docstring"""
return list(self.connections)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->dict[str, int]:
'''simple docstring'''
_UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(a__ , a__ , a__ )
_UpperCamelCase = Counter(graph.get_nodes() )
_UpperCamelCase = start
for _ in range(a__ ):
_UpperCamelCase = graph.transition(a__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case__ ( _snake_case : str , _snake_case : Any=False ):
"""simple docstring"""
try:
UpperCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ = strtobool(_snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
A : List[Any] = parse_flag_from_env('RUN_SLOW', default=False)
A : List[Any] = parse_flag_from_env('RUN_REMOTE', default=False)
A : int = parse_flag_from_env('RUN_LOCAL', default=True)
A : List[str] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
A : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
A : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
A : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
A : Any = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
A : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
A : Optional[int] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
A : Optional[int] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires faiss" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires regex" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires elasticsearch" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires sqlalchemy" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires PyTorch" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires TensorFlow" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires JAX" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires Pillow" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(_snake_case )
else:
return test_case
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(_snake_case )
else:
return test_case
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(_snake_case )
else:
return test_case
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
def _require_spacy_model(_snake_case : List[str] ):
try:
import spacy # noqa F401
spacy.load(_snake_case )
except ImportError:
return unittest.skip("test requires spacy" )(_snake_case )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_snake_case ) )(_snake_case )
else:
return test_case
return _require_spacy_model
def snake_case__ ( _snake_case : Optional[Any] ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(_snake_case )
else:
return test_case
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(_snake_case )
else:
return test_case
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ = unittest.skip("test is slow" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ = unittest.skip("test is local" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ = unittest.skip("test is packaged" )(_snake_case )
return test_case
def snake_case__ ( _snake_case : List[Any] ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ = unittest.skip("test requires remote" )(_snake_case )
return test_case
def snake_case__ ( *_snake_case : int ):
"""simple docstring"""
def decorate(cls : Union[str, Any] ):
for name, fn in cls.__dict__.items():
if callable(_snake_case ) and name.startswith("test" ):
for decorator in decorators:
UpperCamelCase__ = decorator(_snake_case )
setattr(cls , _snake_case , _snake_case )
return cls
return decorate
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
pass
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 0
A = 1
A = 2
@contextmanager
def snake_case__ ( _snake_case : Optional[int]=OfflineSimulationMode.CONNECTION_FAILS , _snake_case : str=1E-16 ):
"""simple docstring"""
UpperCamelCase__ = requests.Session().request
def timeout_request(_snake_case : Union[str, Any] , _snake_case : str , _snake_case : Optional[Any] , **_snake_case : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase__ = timeout
try:
return online_request(_snake_case , _snake_case , **_snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ = url
UpperCamelCase__ = e.args[0]
UpperCamelCase__ = (max_retry_error.args[0].replace("10.255.255.1" , F'OfflineMock[{url}]' ),)
UpperCamelCase__ = (max_retry_error,)
raise
def raise_connection_error(_snake_case : Tuple , _snake_case : Optional[Any] , **_snake_case : Dict ):
raise requests.ConnectionError("Offline mode is enabled." , request=_snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _snake_case ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def snake_case__ ( *_snake_case : Dict , **_snake_case : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_snake_case , **_snake_case ) as tmp_dir:
try:
os.chdir(_snake_case )
yield
finally:
os.chdir(_snake_case )
@contextmanager
def snake_case__ ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case__ ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case__ ( _snake_case : Tuple , _snake_case : int ):
"""simple docstring"""
return deepcopy(_snake_case ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_snake_case ).integers(0 , 1_00 , 10 ).tolist()
def snake_case__ ( _snake_case : Optional[int] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_snake_case : Union[str, Any] , *_snake_case : Union[str, Any] , **_snake_case : str ):
try:
return func(*_snake_case , **_snake_case )
except HTTPError as err:
if str(_snake_case ).startswith("500" ) or str(_snake_case ).startswith("502" ):
pytest.xfail(str(_snake_case ) )
raise err
return decorator.decorator(_wrapper , _snake_case )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = returncode
UpperCamelCase__ = stdout
UpperCamelCase__ = stderr
async def snake_case__ ( _snake_case : Dict , _snake_case : List[str] ):
"""simple docstring"""
while True:
UpperCamelCase__ = await stream.readline()
if line:
callback(_snake_case )
else:
break
async def snake_case__ ( _snake_case : Optional[int] , _snake_case : Dict=None , _snake_case : List[Any]=None , _snake_case : Optional[int]=None , _snake_case : List[Any]=False , _snake_case : Dict=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(_snake_case ) )
UpperCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ = []
UpperCamelCase__ = []
def tee(_snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple="" ):
UpperCamelCase__ = line.decode("utf-8" ).rstrip()
sink.append(_snake_case )
if not quiet:
print(_snake_case , _snake_case , file=_snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _snake_case : tee(_snake_case , _snake_case , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda _snake_case : tee(_snake_case , _snake_case , sys.stderr , label="stderr:" ) ),
] , timeout=_snake_case , )
return _RunOutput(await p.wait() , _snake_case , _snake_case )
def snake_case__ ( _snake_case : int , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=None , _snake_case : Optional[int]=1_80 , _snake_case : List[str]=False , _snake_case : Tuple=True ):
"""simple docstring"""
UpperCamelCase__ = asyncio.get_event_loop()
UpperCamelCase__ = loop.run_until_complete(
_stream_subprocess(_snake_case , env=_snake_case , stdin=_snake_case , timeout=_snake_case , quiet=_snake_case , echo=_snake_case ) )
UpperCamelCase__ = " ".join(_snake_case )
if result.returncode > 0:
UpperCamelCase__ = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
UpperCamelCase__ = re.sub(R"^gw" , "" , _snake_case , 0 , re.M )
return int(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = 2_95_00
UpperCamelCase__ = pytest_xdist_worker_id()
return port + uniq_delta | 304 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self :List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def lowerCamelCase__ ( self :Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=1_0 , )
return model
@property
def lowerCamelCase__ ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCamelCase__ = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def lowerCamelCase__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ = DDPMScheduler()
UpperCamelCase__ = AudioDiffusionPipeline(vqvae=lowerCamelCase_ , unet=self.dummy_unet , mel=lowerCamelCase_ , scheduler=lowerCamelCase_ )
UpperCamelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(4_2 )
UpperCamelCase__ = pipe(generator=lowerCamelCase_ , steps=4 )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(4_2 )
UpperCamelCase__ = pipe(generator=lowerCamelCase_ , steps=4 , return_dict=lowerCamelCase_ )
UpperCamelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0]
UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:1_0]
UpperCamelCase__ = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vqvae_and_unet
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase_ , scheduler=lowerCamelCase_ )
UpperCamelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
np.random.seed(0 )
UpperCamelCase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(4_2 )
UpperCamelCase__ = pipe(raw_audio=lowerCamelCase_ , generator=lowerCamelCase_ , start_step=5 , steps=1_0 )
UpperCamelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0]
UpperCamelCase__ = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = self.dummy_unet_condition
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase_ , mel=lowerCamelCase_ , scheduler=lowerCamelCase_ )
UpperCamelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
np.random.seed(0 )
UpperCamelCase__ = torch.rand((1, 1, 1_0) )
UpperCamelCase__ = pipe(generator=lowerCamelCase_ , encoding=lowerCamelCase_ )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0]
UpperCamelCase__ = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Any ) -> str:
"""simple docstring"""
UpperCamelCase__ = torch_device
UpperCamelCase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCamelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(4_2 )
UpperCamelCase__ = pipe(generator=lowerCamelCase_ )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:1_0]
UpperCamelCase__ = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 304 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( a__ ):
snake_case__ = '''glpn'''
def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=64 , _snake_case=10 , _snake_case=-1 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 4 | 1 |
from __future__ import annotations
import pandas as pd
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
A_ : Union[str, Any] = [0] * no_of_processes
A_ : Union[str, Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowercase ):
A_ : Union[str, Any] = burst_time[i]
A_ : Union[str, Any] = 0
A_ : int = 0
A_ : Any = 9_99_99_99_99
A_ : List[Any] = 0
A_ : str = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A_ : int = remaining_time[j]
A_ : Optional[Any] = j
A_ : str = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A_ : Tuple = remaining_time[short]
if minm == 0:
A_ : int = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
A_ : List[str] = False
# Find finish time of current process
A_ : Union[str, Any] = increment_time + 1
# Calculate waiting time
A_ : int = finish_time - arrival_time[short]
A_ : Optional[int] = finar - burst_time[short]
if waiting_time[short] < 0:
A_ : Optional[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : int ,__lowercase : list[int] ):
'''simple docstring'''
A_ : Union[str, Any] = [0] * no_of_processes
for i in range(__lowercase ):
A_ : Any = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
A_ : Optional[Any] = 0
A_ : List[Any] = 0
for i in range(__lowercase ):
A_ : List[Any] = total_waiting_time + waiting_time[i]
A_ : Any = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('Average turn around time =' ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_UpperCAmelCase = int(input())
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_UpperCAmelCase ,_UpperCAmelCase = map(int, input().split())
_UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCAmelCase = burst_time
_UpperCAmelCase = no_of_processes
_UpperCAmelCase = waiting_time
_UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 70 | def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCAmelCase_ = {'''vinai/bartpho-syllable''': 1_0_2_4}
class _snake_case( UpperCAmelCase ):
__snake_case: Union[str, Any] = VOCAB_FILES_NAMES
__snake_case: Dict = PRETRAINED_VOCAB_FILES_MAP
__snake_case: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case: Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self : Optional[Any] , a : List[str] , a : Dict , a : List[str]="<s>" , a : List[Any]="</s>" , a : List[Any]="</s>" , a : Optional[int]="<s>" , a : Dict="<unk>" , a : Optional[int]="<pad>" , a : Any="<mask>" , a : Optional[Dict[str, Any]] = None , **a : Optional[int] , ) -> None:
"""simple docstring"""
A__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(a , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(a ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Any , a : int ) -> Any:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase (self : Union[str, Any] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase (self : Any , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCamelCase (self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase (self : Optional[int] ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def _UpperCamelCase (self : Tuple ) -> Any:
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase (self : Optional[Any] , a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase (self : Dict , a : Union[str, Any] ) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCamelCase (self : Optional[int] , a : int ) -> Tuple:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def _UpperCamelCase (self : Optional[Any] , a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ = ''.join(a ).replace(a , ' ' ).strip()
return out_string
def _UpperCamelCase (self : List[str] , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(a , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 531 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCAmelCase_ = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ = concatenate_datasets
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadManager
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 531 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''big_bird'''
def __init__( self , UpperCamelCase__=5_0358 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=4096 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=66 , UpperCamelCase__="block_sparse" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=64 , UpperCamelCase__=3 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : Union[str, Any] = vocab_size
snake_case : List[Any] = max_position_embeddings
snake_case : int = hidden_size
snake_case : str = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : int = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : List[str] = type_vocab_size
snake_case : Optional[Any] = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : List[Any] = rescale_embeddings
snake_case : Any = attention_type
snake_case : List[Any] = use_bias
snake_case : int = block_size
snake_case : int = num_random_blocks
snake_case : Optional[int] = classifier_dropout
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 117 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( A_ ):
_UpperCAmelCase ='''open-llama'''
def __init__( self: Optional[Any] , a: Any=10_00_00 , a: List[str]=40_96 , a: Any=1_10_08 , a: Union[str, Any]=32 , a: Dict=32 , a: Any="silu" , a: str=20_48 , a: Optional[int]=0.02 , a: str=1e-6 , a: Optional[int]=True , a: int=0 , a: Any=1 , a: Any=2 , a: Optional[int]=False , a: List[str]=True , a: Union[str, Any]=0.1 , a: List[Any]=0.1 , a: List[str]=True , a: Any=True , a: Dict=None , **a: Tuple , ) ->Optional[int]:
'''simple docstring'''
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = intermediate_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = initializer_range
a_ = rms_norm_eps
a_ = use_cache
a_ = kwargs.pop(
"use_memorry_efficient_attention" , _lowerCamelCase)
a_ = hidden_dropout_prob
a_ = attention_dropout_prob
a_ = use_stable_embedding
a_ = shared_input_output_embedding
a_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""")
a_ = self.rope_scaling.get("type" , _lowerCamelCase)
a_ = self.rope_scaling.get("factor" , _lowerCamelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 685 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase__ ( A_ ):
def __init__( self : Tuple , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ):
_snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCamelCase )
_snake_case = self.values[key]
def lowercase ( self : int ):
return (
sum(self.charge_factor - len(_lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCamelCase , _lowerCamelCase )
| 224 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: Distribution , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: Optional[int]=0 ):
lowerCamelCase__ : List[str] = 1.0 if scale is None else scale
lowerCamelCase__ : List[str] = 0.0 if loc is None else loc
super().__init__(UpperCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase__ )] )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase_ ( self: int ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase_ ( self: str ):
return self.variance.sqrt()
class _lowercase ( nn.Module ):
def __init__( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: Dict[str, int] , UpperCamelCase__: Callable[..., Tuple[torch.Tensor]] , **UpperCamelCase__: Tuple ):
super().__init__(**UpperCamelCase__ )
lowerCamelCase__ : List[Any] = args_dim
lowerCamelCase__ : int = nn.ModuleList([nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) for dim in args_dim.values()] )
lowerCamelCase__ : Optional[Any] = domain_map
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : str = [proj(UpperCamelCase__ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase__ )
class _lowercase ( nn.Module ):
def __init__( self: Dict , UpperCamelCase__: Optional[Any] ):
super().__init__()
lowerCamelCase__ : List[str] = function
def lowerCamelCase_ ( self: Any , UpperCamelCase__: int , *UpperCamelCase__: Optional[Any] ):
return self.function(UpperCamelCase__ , *UpperCamelCase__ )
class _lowercase :
a = 42
a = 42
a = 42
def __init__( self: Any , UpperCamelCase__: int = 1 ):
lowerCamelCase__ : List[str] = dim
lowerCamelCase__ : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict ):
if self.dim == 1:
return self.distribution_class(*UpperCamelCase__ )
else:
return Independent(self.distribution_class(*UpperCamelCase__ ) , 1 )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None , ):
lowerCamelCase__ : str = self._base_distribution(UpperCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase__ , loc=UpperCamelCase__ , scale=UpperCamelCase__ , event_dim=self.event_dim )
@property
def lowerCamelCase_ ( self: List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return len(self.event_shape )
@property
def lowerCamelCase_ ( self: str ):
return 0.0
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int ):
return ParameterProjection(
in_features=UpperCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase__: torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: torch.Tensor ):
return (x + torch.sqrt(torch.square(UpperCamelCase__ ) + 4.0 )) / 2.0
class _lowercase ( _lowercase ):
a = {"df": 1, "loc": 1, "scale": 1}
a = StudentT
@classmethod
def lowerCamelCase_ ( cls: Union[str, Any] , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : Tuple = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase__ : Union[str, Any] = 2.0 + cls.squareplus(UpperCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( _lowercase ):
a = {"loc": 1, "scale": 1}
a = Normal
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : List[Any] = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( _lowercase ):
a = {"total_count": 1, "logits": 1}
a = NegativeBinomial
@classmethod
def lowerCamelCase_ ( cls: Union[str, Any] , UpperCamelCase__: torch.Tensor , UpperCamelCase__: torch.Tensor ):
lowerCamelCase__ : List[Any] = cls.squareplus(UpperCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ ) , 1 )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 631 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase = get_tests_dir("fixtures")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[int]:
# A mock response for an HTTP head request to emulate server down
snake_case_ = mock.Mock()
snake_case_ = 5_00
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=a ) as mock_head:
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCamelCase ( cls ) -> Union[str, Any]:
snake_case_ = TOKEN
HfFolder.save_token(a )
@classmethod
def _UpperCamelCase ( cls ) -> Any:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(a )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a , repo_id='test-feature-extractor' , push_to_hub=a , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def _UpperCamelCase ( self ) -> str:
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(a )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
a , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=a , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def _UpperCamelCase ( self ) -> List[Any]:
CustomFeatureExtractor.register_for_auto_class()
snake_case_ = CustomFeatureExtractor.from_pretrained(a )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
snake_case_ = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 198 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowercase = "CompVis/stable-diffusion-v1-1"
lowercase = "CompVis/stable-diffusion-v1-2"
lowercase = "CompVis/stable-diffusion-v1-3"
lowercase = "CompVis/stable-diffusion-v1-4"
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a , a , a , a , a , a , a = True , ) -> int:
super()._init_()
snake_case_ = StableDiffusionPipeline.from_pretrained(a )
snake_case_ = StableDiffusionPipeline.from_pretrained(a )
snake_case_ = StableDiffusionPipeline.from_pretrained(a )
snake_case_ = StableDiffusionPipeline(
vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , safety_checker=a , feature_extractor=a , requires_safety_checker=a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _UpperCamelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , a ) for k in self.config.keys() if not k.startswith('_' )}
def _UpperCamelCase ( self , a = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _UpperCamelCase ( self ) -> List[Any]:
self.enable_attention_slicing(a )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Optional[Any]:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> int:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Optional[int]:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Any:
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def _UpperCamelCase ( self , a , a = 5_12 , a = 5_12 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Optional[Any]:
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case_ = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 198 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowercase ( UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : list ):
"""simple docstring"""
A__ : Tuple =np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCamelCase )] )
A__ : Optional[int] =np.array(UpperCamelCase )
A__ : Optional[int] =np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCamelCase ) ) , x.transpose() ) , UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowercase ( UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : list ):
"""simple docstring"""
A__ : Dict =(1, 2, 1)
A__ : int =(1, 1, 0, 7)
A__ : Optional[int] =SARIMAX(
UpperCamelCase , exog=UpperCamelCase , order=UpperCamelCase , seasonal_order=UpperCamelCase )
A__ : Dict =model.fit(disp=UpperCamelCase , maxiter=600 , method="nm" )
A__ : Union[str, Any] =model_fit.predict(1 , len(UpperCamelCase ) , exog=[test_match] )
return result[0]
def lowercase ( UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : list ):
"""simple docstring"""
A__ : Dict =SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(UpperCamelCase , UpperCamelCase )
A__ : List[str] =regressor.predict(UpperCamelCase )
return y_pred[0]
def lowercase ( UpperCamelCase : list ):
"""simple docstring"""
train_user.sort()
A__ : Optional[int] =np.percentile(UpperCamelCase , 25 )
A__ : Dict =np.percentile(UpperCamelCase , 75 )
A__ : Union[str, Any] =qa - qa
A__ : List[str] =qa - (iqr * 0.1)
return low_lim
def lowercase ( UpperCamelCase : list , UpperCamelCase : float ):
"""simple docstring"""
A__ : List[str] =0
A__ : Any =0
for i in list_vote:
if i > actual_result:
A__ : str =not_safe + 1
else:
if abs(abs(UpperCamelCase ) - abs(UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__A : Optional[int] = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__A : int = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__A : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
__A : Optional[Any] = normalize_df[:, 2].tolist()
__A : Any = normalize_df[:, 0].tolist()
__A : Tuple = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__A : int = normalize_df[:, [1, 2]].tolist()
__A : List[Any] = x[: len(x) - 1]
__A : Any = x[len(x) - 1 :]
# for linear regression & sarimax
__A : Optional[Any] = total_date[: len(total_date) - 1]
__A : int = total_user[: len(total_user) - 1]
__A : Tuple = total_match[: len(total_match) - 1]
__A : List[Any] = total_date[len(total_date) - 1 :]
__A : Tuple = total_user[len(total_user) - 1 :]
__A : List[str] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__A : Dict = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__A : Union[str, Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 595 | """simple docstring"""
__A : int = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Union[str, Any] ={"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
A__ : Tuple =0
A__ : List[str] =0
while place < len(UpperCamelCase ):
if (place + 1 < len(UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Dict =[]
for arabic, roman in ROMAN:
((A__) , (A__)) : Union[str, Any] =divmod(UpperCamelCase , UpperCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase__ ( __magic_name__ ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( __magic_name__ = 0.1 ) ->int:
__lowercase = 3
__lowercase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : str = DebertaTokenizer
__a : Tuple = True
__a : Dict = DebertaTokenizerFast
def __snake_case ( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
_UpperCAmelCase : Tuple = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : Union[str, Any] = {"""unk_token""": """[UNK]"""}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def __snake_case ( self , **_A ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """lower newer"""
_UpperCAmelCase : Union[str, Any] = """lower newer"""
return input_text, output_text
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[Any] = """lower newer"""
_UpperCAmelCase : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : str = tokenizer("""Hello""" , """World""" )
_UpperCAmelCase : Any = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , _A )
@slow
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
_UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
_UpperCAmelCase : Dict = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_A , add_prefix_space=_A )
_UpperCAmelCase : List[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_A , add_prefix_space=_A )
_UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_UpperCAmelCase : Dict = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_UpperCAmelCase : Optional[int] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
_UpperCAmelCase : Any = tokenizer(_A , padding=_A )
_UpperCAmelCase : str = [tokenizer.decode(_A , skip_special_tokens=_A ) for seq in encoding["""input_ids"""]]
# fmt: off
_UpperCAmelCase : Tuple = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_UpperCAmelCase : List[str] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , _A )
for expected, decoded in zip(_A , _A ):
self.assertEqual(_A , _A )
| 238 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase_ ( self ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[str] = ort.SessionOptions()
lowerCAmelCase_ : int = False
return options
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCAmelCase_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowerCAmelCase_ : Dict = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Optional[int] = '''A red cat sitting on a park bench'''
lowerCAmelCase_ : Any = np.random.RandomState(0 )
lowerCAmelCase_ : Optional[Any] = pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[Any] = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 543 | """simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , **_snake_case : Dict ) -> List[str]:
super().__init__(**_snake_case )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_snake_case )
def lowerCAmelCase_ ( self : int , **_snake_case : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : int , _snake_case : List[str] , *_snake_case : int , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , **_snake_case : Tuple ) -> str:
return super().__call__(_snake_case , *_snake_case , num_workers=_snake_case , batch_size=_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : int , _snake_case : List[Any]=64 , _snake_case : int = 0 , _snake_case : float = 512 / 1500 , _snake_case : Optional[int] = 32 , _snake_case : Optional[int] = 1 , ) -> int:
SCREAMING_SNAKE_CASE__ = load_image(_snake_case )
SCREAMING_SNAKE_CASE__ = self.image_processor.size["longest_edge"]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.generate_crop_boxes(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = self.image_processor(images=_snake_case , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ = self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE__ = self._ensure_tensor_on_device(_snake_case , device=self.device )
SCREAMING_SNAKE_CASE__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
SCREAMING_SNAKE_CASE__ = image_embeddings
SCREAMING_SNAKE_CASE__ = grid_points.shape[1]
SCREAMING_SNAKE_CASE__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , _snake_case , _snake_case ):
SCREAMING_SNAKE_CASE__ = grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE__ = input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[int] , _snake_case : Dict=0.88 , _snake_case : List[Any]=0.95 , _snake_case : List[Any]=0 , _snake_case : List[str]=1 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = model_inputs.pop("input_boxes" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("is_last" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("original_sizes" ).tolist()
SCREAMING_SNAKE_CASE__ = model_inputs.pop("reshaped_input_sizes" ).tolist()
SCREAMING_SNAKE_CASE__ = self.model(**_snake_case )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE__ = model_outputs["pred_masks"]
SCREAMING_SNAKE_CASE__ = self.image_processor.post_process_masks(
_snake_case , _snake_case , _snake_case , _snake_case , binarize=_snake_case )
SCREAMING_SNAKE_CASE__ = model_outputs["iou_scores"]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _snake_case , _snake_case , _snake_case , _snake_case , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase_ ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Any=False , _snake_case : str=False , _snake_case : List[str]=0.7 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
SCREAMING_SNAKE_CASE__ = torch.cat(_snake_case )
SCREAMING_SNAKE_CASE__ = torch.cat(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.post_process_for_mask_generation(
_snake_case , _snake_case , _snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = defaultdict(_snake_case )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_snake_case )
SCREAMING_SNAKE_CASE__ = {}
if output_rle_mask:
SCREAMING_SNAKE_CASE__ = rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 159 | 0 |
def __lowercase( __snake_case : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(__snake_case ,(list, tuple) ) or not all(
isinstance(__snake_case ,__snake_case ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
__snake_case = __snake_case = __snake_case = numbers[0]
for i in range(1 ,len(__snake_case ) ):
# update the maximum and minimum subarray products
__snake_case = numbers[i]
if number < 0:
__snake_case , __snake_case = min_till_now, max_till_now
__snake_case = max(__snake_case ,max_till_now * number )
__snake_case = min(__snake_case ,min_till_now * number )
# update the maximum product found till now
__snake_case = max(__snake_case ,__snake_case )
return max_prod
| 708 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase (lowerCamelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ):
__snake_case = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE_ , )
__snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__snake_case = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
__snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# decode the image latents with the VAE
__snake_case = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 345 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case ( __UpperCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" ,__UpperCamelCase ,)
if isinstance(__UpperCamelCase ,torch.Tensor ):
return image
elif isinstance(__UpperCamelCase ,PIL.Image.Image ):
A_ = [image]
if isinstance(image[0] ,PIL.Image.Image ):
A_ , A_ = image[0].size
A_ , A_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A_ = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
A_ = np.concatenate(__UpperCamelCase ,axis=0 )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image.transpose(0 ,3 ,1 ,2 )
A_ = 2.0 * image - 1.0
A_ = torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] ,torch.Tensor ):
A_ = torch.cat(__UpperCamelCase ,dim=0 )
return image
def __snake_case ( __UpperCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(__UpperCamelCase ,torch.Tensor ):
return mask
elif isinstance(__UpperCamelCase ,PIL.Image.Image ):
A_ = [mask]
if isinstance(mask[0] ,PIL.Image.Image ):
A_ , A_ = mask[0].size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = [np.array(m.convert("L" ).resize((w, h) ,resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
A_ = np.concatenate(__UpperCamelCase ,axis=0 )
A_ = mask.astype(np.floataa ) / 255.0
A_ = 0
A_ = 1
A_ = torch.from_numpy(__UpperCamelCase )
elif isinstance(mask[0] ,torch.Tensor ):
A_ = torch.cat(__UpperCamelCase ,dim=0 )
return mask
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : UNetaDModel
_lowerCamelCase : RePaintScheduler
def __init__( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ):
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , UpperCAmelCase : int = 250 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 10 , UpperCAmelCase : int = 10 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
A_ = image
A_ = _preprocess_image(UpperCAmelCase )
A_ = original_image.to(device=self.device , dtype=self.unet.dtype )
A_ = _preprocess_mask(UpperCAmelCase )
A_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
A_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A_ = original_image.shape
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.device )
A_ = eta
A_ = self.scheduler.timesteps[0] + 1
A_ = generator[0] if isinstance(UpperCAmelCase , UpperCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute previous image: x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A_ = self.scheduler.undo_step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = t
A_ = (image / 2 + 0.5).clamp(0 , 1 )
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 |
from __future__ import annotations
_a = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(__lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(__lowerCAmelCase )
return self.shortest_path(__lowerCAmelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_a = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 481 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "M-CLIP"
def __init__( self: Tuple , UpperCamelCase: Any=10_24 , UpperCamelCase: Tuple=7_68 , **UpperCamelCase: Union[str, Any] ) -> List[str]:
snake_case__ = transformerDimSize
snake_case__ = imageDimSize
super().__init__(**UpperCamelCase )
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = MCLIPConfig
def __init__( self: List[Any] , UpperCamelCase: Union[str, Any] , *UpperCamelCase: int , **UpperCamelCase: int ) -> List[str]:
super().__init__(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
snake_case__ = XLMRobertaModel(UpperCamelCase )
snake_case__ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: str , UpperCamelCase: str ) -> Optional[Any]:
snake_case__ = self.transformer(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )[0]
snake_case__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCamelCase ), embs
| 372 |
__UpperCamelCase : List[str] = 256
# Modulus to hash a string
__UpperCamelCase : int = 1000003
def a_ ( _A , _A ) -> bool:
"""simple docstring"""
snake_case__ = len(_A )
snake_case__ = len(_A )
if p_len > t_len:
return False
snake_case__ = 0
snake_case__ = 0
snake_case__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
snake_case__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a_ ( ) -> None:
"""simple docstring"""
snake_case__ = 'abc1abc12'
snake_case__ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case__ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
snake_case__ = 'ABABX'
snake_case__ = 'ABABZABABYABABX'
assert rabin_karp(_A , _A )
# Test 3)
snake_case__ = 'AAAB'
snake_case__ = 'ABAAAAAB'
assert rabin_karp(_A , _A )
# Test 4)
snake_case__ = 'abcdabcy'
snake_case__ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_A , _A )
# Test 5)
snake_case__ = 'Lü'
snake_case__ = 'Lüsai'
assert rabin_karp(_A , _A )
snake_case__ = 'Lue'
assert not rabin_karp(_A , _A )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_snake_case = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
_snake_case = '''The dog is cute and lives in the garden house'''
_snake_case = jnp.array([tokenizer.encode(__lowerCamelCase )] )
_snake_case = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
_snake_case = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCamelCase , atol=1E-3 ) )
| 103 | def __UpperCamelCase ( A ):
if len(A ) < 2:
return collection
def circle_sort_util(A , A , A ) -> bool:
UpperCamelCase__ = False
if low == high:
return swapped
UpperCamelCase__ = low
UpperCamelCase__ = high
while left < right:
if collection[left] > collection[right]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right],
collection[left],
)
UpperCamelCase__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right + 1],
collection[left],
)
UpperCamelCase__ = True
UpperCamelCase__ = low + int((high - low) / 2 )
UpperCamelCase__ = circle_sort_util(A , A , A )
UpperCamelCase__ = circle_sort_util(A , mid + 1 , A )
return swapped or left_swap or right_swap
UpperCamelCase__ = True
while is_not_sorted is True:
UpperCamelCase__ = circle_sort_util(A , 0 , len(A ) - 1 )
return collection
if __name__ == "__main__":
__magic_name__ =input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ =[int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 415 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
a = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
a = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
a = BeautifulSoup(res.text, "html.parser")
a = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowerCAmelCase_ : List[Any] = True
except (ImportError, AttributeError):
lowerCAmelCase_ : List[str] = object
def _lowerCAmelCase ( *lowerCAmelCase , **lowerCAmelCase ):
'''simple docstring'''
pass
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Optional[Any] = logging.get_logger('''transformers-cli/serving''')
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A__ , args.host , args.port , args.workers )
class UpperCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_A : Optional[int] = 42
class UpperCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_A : Optional[Any] = 42
_A : Union[str, Any] = 42
class UpperCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_A : List[Any] = 42
class UpperCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_A : str = 42
class UpperCamelCase_ ( _SCREAMING_SNAKE_CASE ):
@staticmethod
def UpperCamelCase_ ( snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=snake_case__ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=snake_case__ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=snake_case__ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=snake_case__ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=snake_case__ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=snake_case__ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=snake_case__ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=snake_case__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = pipeline
UpperCAmelCase = host
UpperCAmelCase = port
UpperCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'''Serving model over {host}:{port}''' )
UpperCAmelCase = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=snake_case__ , response_class=snake_case__ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=snake_case__ , response_class=snake_case__ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=snake_case__ , response_class=snake_case__ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=snake_case__ , response_class=snake_case__ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCamelCase_ ( self , snake_case__ = Body(snake_case__ , embed=snake_case__ ) , snake_case__ = Body(snake_case__ , embed=snake_case__ ) ) -> Dict:
"""simple docstring"""
try:
UpperCAmelCase = self._pipeline.tokenizer.tokenize(snake_case__ )
if return_ids:
UpperCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case__ )
return ServeTokenizeResult(tokens=snake_case__ , tokens_ids=snake_case__ )
else:
return ServeTokenizeResult(tokens=snake_case__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(snake_case__ )} )
def UpperCamelCase_ ( self , snake_case__ = Body(snake_case__ , embed=snake_case__ ) , snake_case__ = Body(snake_case__ , embed=snake_case__ ) , snake_case__ = Body(snake_case__ , embed=snake_case__ ) , ) -> Optional[Any]:
"""simple docstring"""
try:
UpperCAmelCase = self._pipeline.tokenizer.decode(snake_case__ , snake_case__ , snake_case__ )
return ServeDeTokenizeResult(model="""""" , text=snake_case__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(snake_case__ )} )
async def UpperCamelCase_ ( self , snake_case__=Body(snake_case__ , embed=snake_case__ ) ) -> Any:
"""simple docstring"""
if len(snake_case__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
UpperCAmelCase = self._pipeline(snake_case__ )
return ServeForwardResult(output=snake_case__ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(snake_case__ )} )
| 673 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A : str = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether tp freeze the encoder.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether to freeze the embeddings.'})
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
lowercase = field(
default='summarization' ,metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} ,)
lowercase = field(
default=10_24 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(default=-1 ,metadata={'help': '# training examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# validation examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# test examples. -1 means use all.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Source language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Target language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': '# num_beams to use for evaluation.'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} ,)
def UpperCamelCase_ ( A__ : Tuple , A__ : List[str] , A__ : str ):
'''simple docstring'''
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(A__ , os.path.join(A__ , f'{split}_results.json' ) )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(A__ , A__ , getattr(A__ , A__ ) )
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase_ : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase_ : Any = SeqaSeqDataset
# Get datasets
lowerCAmelCase_ : List[str] = (
dataset_class(
A__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase_ : Dict = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
lowerCAmelCase_ : str = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
lowerCAmelCase_ : Dict = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowerCAmelCase_ : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase_ : Union[str, Any] = train_result.metrics
lowerCAmelCase_ : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ : Any = trainer.evaluate(metric_key_prefix="""val""" )
lowerCAmelCase_ : Optional[int] = data_args.n_val
lowerCAmelCase_ : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase_ : Union[str, Any] = trainer.predict(test_dataset=A__ , metric_key_prefix="""test""" )
lowerCAmelCase_ : Optional[int] = test_output.metrics
lowerCAmelCase_ : List[Any] = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase_ : int = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
lowerCAmelCase_ : int = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
lowerCAmelCase_ : List[Any] = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 275 | 0 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 716 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowercase ( __lowerCamelCase ):
snake_case_ = 42
snake_case_ = 42
class __lowercase ( nn.Module ):
snake_case_ = 42
snake_case_ = (1_6, 3_2, 9_6, 2_5_6)
snake_case_ = jnp.floataa
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
UpperCAmelCase__ : Tuple = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase__ : Union[str, Any] = self.block_out_channels[i]
UpperCAmelCase__ : Dict = self.block_out_channels[i + 1]
UpperCAmelCase__ : Optional[int] = nn.Conv(
A ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(A )
UpperCAmelCase__ : Optional[int] = nn.Conv(
A ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(A )
UpperCAmelCase__ : List[Any] = blocks
UpperCAmelCase__ : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Dict ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.conv_in(A )
UpperCAmelCase__ : Tuple = nn.silu(A )
for block in self.blocks:
UpperCAmelCase__ : Tuple = block(A )
UpperCAmelCase__ : str = nn.silu(A )
UpperCAmelCase__ : Dict = self.conv_out(A )
return embedding
@flax_register_to_config
class __lowercase ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
snake_case_ = 3_2
snake_case_ = 4
snake_case_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case_ = False
snake_case_ = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
snake_case_ = 2
snake_case_ = 8
snake_case_ = None
snake_case_ = 1_2_8_0
snake_case_ = 0.0
snake_case_ = False
snake_case_ = jnp.floataa
snake_case_ = True
snake_case_ = 0
snake_case_ = "rgb"
snake_case_ = (1_6, 3_2, 9_6, 2_5_6)
def __lowercase ( self : Tuple ,A : jax.random.KeyArray ):
'''simple docstring'''
# init input tensors
UpperCAmelCase__ : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase__ : Optional[Any] = jnp.zeros(A ,dtype=jnp.floataa )
UpperCAmelCase__ : Union[str, Any] = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase__ : int = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase__ : Dict = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase__ : str = jnp.zeros(A ,dtype=jnp.floataa )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = jax.random.split(A )
UpperCAmelCase__ : Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(A ,A ,A ,A ,A )["params"]
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.block_out_channels
UpperCAmelCase__ : Dict = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase__ : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase__ : Optional[Any] = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase__ : List[str] = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase__ : List[Any] = FlaxTimestepEmbedding(A ,dtype=self.dtype )
UpperCAmelCase__ : Dict = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
UpperCAmelCase__ : Any = self.only_cross_attention
if isinstance(A ,A ):
UpperCAmelCase__ : Optional[int] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A ,A ):
UpperCAmelCase__ : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = block_out_channels[0]
UpperCAmelCase__ : Any = nn.Conv(
A ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(A )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase__ : Optional[Any] = output_channel
UpperCAmelCase__ : Any = block_out_channels[i]
UpperCAmelCase__ : List[str] = i == len(A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=A ,out_channels=A ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
UpperCAmelCase__ : List[str] = FlaxDownBlockaD(
in_channels=A ,out_channels=A ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(A )
for _ in range(self.layers_per_block ):
UpperCAmelCase__ : Tuple = nn.Conv(
A ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(A )
if not is_final_block:
UpperCAmelCase__ : Optional[int] = nn.Conv(
A ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(A )
UpperCAmelCase__ : str = down_blocks
UpperCAmelCase__ : Optional[Any] = controlnet_down_blocks
# mid
UpperCAmelCase__ : Dict = block_out_channels[-1]
UpperCAmelCase__ : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=A ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
UpperCAmelCase__ : Union[str, Any] = nn.Conv(
A ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,A : Any ,A : Tuple ,A : str ,A : Optional[int] ,A : float = 1.0 ,A : bool = True ,A : bool = False ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase__ : Optional[Any] = jnp.flip(A ,axis=1 )
# 1. time
if not isinstance(A ,jnp.ndarray ):
UpperCAmelCase__ : Union[str, Any] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(A ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ : Dict = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase__ : Any = jnp.expand_dims(A ,0 )
UpperCAmelCase__ : str = self.time_proj(A )
UpperCAmelCase__ : List[Any] = self.time_embedding(A )
# 2. pre-process
UpperCAmelCase__ : Dict = jnp.transpose(A ,(0, 2, 3, 1) )
UpperCAmelCase__ : Tuple = self.conv_in(A )
UpperCAmelCase__ : List[Any] = jnp.transpose(A ,(0, 2, 3, 1) )
UpperCAmelCase__ : Dict = self.controlnet_cond_embedding(A )
sample += controlnet_cond
# 3. down
UpperCAmelCase__ : str = (sample,)
for down_block in self.down_blocks:
if isinstance(A ,A ):
UpperCAmelCase__ , UpperCAmelCase__ : int = down_block(A ,A ,A ,deterministic=not train )
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = down_block(A ,A ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase__ : Any = self.mid_block(A ,A ,A ,deterministic=not train )
# 5. contronet blocks
UpperCAmelCase__ : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(A ,self.controlnet_down_blocks ):
UpperCAmelCase__ : Tuple = controlnet_block(A )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase__ : int = controlnet_down_block_res_samples
UpperCAmelCase__ : Tuple = self.controlnet_mid_block(A )
# 6. scaling
UpperCAmelCase__ : List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A ,mid_block_res_sample=A )
| 65 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( __a ):
_lowercase =42
_lowercase =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 290 | 0 |
from typing import Any
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
_validation(
lowercase , lowercase , lowercase , lowercase , lowercase , )
# Creates data structures and fill initial step
__lowercase = {}
__lowercase = {}
for state in states_space:
__lowercase = observations_space[0]
__lowercase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__lowercase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase ) ):
__lowercase = observations_space[o]
__lowercase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__lowercase = ''''''
__lowercase = -1
for k_state in states_space:
__lowercase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__lowercase = probability
__lowercase = k_state
# Update probabilities and pointers dicts
__lowercase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__lowercase = arg_max
# The final observation
__lowercase = observations_space[len(lowercase ) - 1]
# argmax for given final observation
__lowercase = ''''''
__lowercase = -1
for k_state in states_space:
__lowercase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__lowercase = probability
__lowercase = k_state
__lowercase = arg_max
# Process pointers backwards
__lowercase = last_state
__lowercase = []
for o in range(len(lowercase ) - 1 , -1 , -1 ):
result.append(lowercase )
__lowercase = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
_validate_not_empty(
lowercase , lowercase , lowercase , lowercase , lowercase , )
_validate_lists(lowercase , lowercase )
_validate_dicts(
lowercase , lowercase , lowercase )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
_validate_list(lowercase , '''observations_space''' )
_validate_list(lowercase , '''states_space''' )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if not isinstance(_object , lowercase ):
__lowercase = F"{var_name} must be a list"
raise ValueError(lowercase )
else:
for x in _object:
if not isinstance(lowercase , lowercase ):
__lowercase = F"{var_name} must be a list of strings"
raise ValueError(lowercase )
def UpperCAmelCase ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
_validate_dict(lowercase , '''initial_probabilities''' , lowercase )
_validate_nested_dict(lowercase , '''transition_probabilities''' )
_validate_nested_dict(lowercase , '''emission_probabilities''' )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
_validate_dict(_object , lowercase , lowercase )
for x in _object.values():
_validate_dict(lowercase , lowercase , lowercase , lowercase )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
if not isinstance(_object , lowercase ):
__lowercase = F"{var_name} must be a dict"
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object ):
__lowercase = F"{var_name} all keys must be strings"
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object.values() ):
__lowercase = '''nested dictionary ''' if nested else ''''''
__lowercase = F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 713 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowercase , lowercase , bias=lowercase )
__lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowercase )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__a : Union[str, Any] = parser.parse_args()
__a : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 522 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase__ =logging.get_logger(__name__)
class a_ ( UpperCamelCase__ ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 263 |
'''simple docstring'''
import enum
import shutil
import sys
lowercase__ , lowercase__ =shutil.get_terminal_size()
lowercase__ ={'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class a_ ( enum.Enum ):
lowerCamelCase__ : int = 0
lowerCamelCase__ : Union[str, Any] = 1
def UpperCamelCase_ ( A__ , A__="" ):
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def UpperCamelCase_ ( A__ , A__ , A__="" ):
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , A__ )
def UpperCamelCase_ ( ):
forceWrite("""\r""" )
def UpperCamelCase_ ( A__ , A__ ):
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def UpperCamelCase_ ( ):
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCamelCase_ ( ):
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 263 | 1 |
'''simple docstring'''
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__a: List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 719 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : Optional[Any] = 0
count += depth_first_search(UpperCAmelCase , row + 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , row - 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col + 1 , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col - 1 , UpperCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a__ : int = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
a__ : Dict = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
a__ : List[str] = soup.find('meta', {'property': 'og:image'})['content']
a__ : Optional[int] = requests.get(image_url).content
a__ : Tuple = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 51 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase__ =' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Tuple ):
__a : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
__a : Dict = self.diffusers_dir
shutil.copy(
os.path.join(snake_case_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def lowerCAmelCase (self : Any ):
__a : List[str] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Any=None ):
__a : int = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__a : Union[str, Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
__a : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__a : Dict = black.format_str(snake_case_ , mode=snake_case_ )
__a : Any = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(snake_case_ , '''w''' , newline='''\n''' ) as f:
f.write(snake_case_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case_ )
with open(snake_case_ , '''r''' ) as f:
self.assertTrue(f.read() , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] ):
__a : int = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : str ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , snake_case_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , snake_case_ ) , )
# Copy consistency with a really long name
__a : Any = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub('''Bert''' , snake_case_ , snake_case_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , snake_case_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , snake_case_ ) , )
| 715 |
from manim import *
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : Any ):
__a : Dict = Rectangle(height=0.5 , width=0.5 )
__a : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : List[str] = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Tuple = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Union[str, Any] = Text('''CPU''' , font_size=2_4 )
__a : Tuple = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
__a : int = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : List[str] = Text('''GPU''' , font_size=2_4 )
__a : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
__a : str = [mem.copy() for i in range(6 )]
__a : Optional[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Optional[Any] = Text('''Model''' , font_size=2_4 )
__a : List[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
__a : Dict = []
for i, rect in enumerate(snake_case_ ):
rect.set_stroke(snake_case_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__a : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=snake_case_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=snake_case_ , buff=0.0 )
self.add(snake_case_ )
cpu_targs.append(snake_case_ )
__a : List[str] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
__a : Optional[int] = Text('''Loaded Checkpoint''' , font_size=2_4 )
__a : str = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , aligned_edge=snake_case_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__a : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : str = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
__a : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__a : int = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) , Write(snake_case_ ) )
self.play(Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) )
__a : int = []
__a : int = []
for i, rect in enumerate(snake_case_ ):
__a : Tuple = fill.copy().set_fill(snake_case_ , opacity=0.7 )
target.move_to(snake_case_ )
first_animations.append(GrowFromCenter(snake_case_ , run_time=1 ) )
__a : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 326 | 0 |
from ...configuration_utils import PretrainedConfig
_lowerCamelCase = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "tapas"
def __init__( self :Tuple , __A :List[Any]=3_0522 , __A :Dict=768 , __A :List[Any]=12 , __A :int=12 , __A :Any=3072 , __A :str="gelu" , __A :Union[str, Any]=0.1 , __A :Optional[Any]=0.1 , __A :Tuple=1024 , __A :Any=[3, 256, 256, 2, 256, 256, 10] , __A :Optional[Any]=0.0_2 , __A :Tuple=1E-12 , __A :Any=0 , __A :Union[str, Any]=1_0.0 , __A :Any=0 , __A :Tuple=1.0 , __A :Optional[int]=None , __A :Dict=1.0 , __A :Optional[Any]=False , __A :Union[str, Any]=None , __A :List[Any]=1.0 , __A :int=1.0 , __A :Optional[int]=False , __A :Optional[int]=False , __A :Tuple="ratio" , __A :List[Any]=None , __A :Optional[int]=None , __A :Any=64 , __A :Tuple=32 , __A :Any=False , __A :List[str]=True , __A :Optional[Any]=False , __A :str=False , __A :List[str]=True , __A :Any=False , __A :Optional[int]=None , __A :Any=None , **__A :Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__A , **__A )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __A ):
SCREAMING_SNAKE_CASE__ = {int(__A ): v for k, v in aggregation_labels.items()} | 6 | '''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if array[i] == target:
return i
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = 0
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase_ = one_third - 1
elif array[two_third] < target:
lowercase_ = two_third + 1
else:
lowercase_ = one_third + 1
lowercase_ = two_third - 1
else:
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE_ , one_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 451 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ):
# Load configuration defined in the metadata file
with open(UpperCamelCase_ ) as metadata_file:
_lowerCAmelCase : List[str] = json.load(UpperCamelCase_ )
_lowerCAmelCase : int = LukeConfig(use_entity_aware_attention=UpperCamelCase_ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : Optional[int] = torch.load(UpperCamelCase_ , map_location="""cpu""" )
# Load the entity vocab file
_lowerCAmelCase : List[Any] = load_entity_vocab(UpperCamelCase_ )
_lowerCAmelCase : Tuple = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Tuple = AddedToken("""<ent>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = AddedToken("""<ent2>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Dict = LukeTokenizer.from_pretrained(UpperCamelCase_ )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : int = state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : int = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_lowerCAmelCase : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : int = F"encoder.layer.{layer_index}.attention.self."
_lowerCAmelCase : List[Any] = state_dict[prefix + matrix_name]
_lowerCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_lowerCAmelCase : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase : str = entity_emb[entity_vocab["""[MASK]"""]]
_lowerCAmelCase : List[str] = LukeModel(config=UpperCamelCase_ ).eval()
_lowerCAmelCase : str = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if not (len(UpperCamelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(UpperCamelCase_ )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
_lowerCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase_ , task="""entity_classification""" )
_lowerCAmelCase : Union[str, Any] = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_lowerCAmelCase : int = (39, 42)
_lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , entity_spans=[span] , add_prefix_space=UpperCamelCase_ , return_tensors="""pt""" )
_lowerCAmelCase : Dict = model(**UpperCamelCase_ )
# Verify word hidden states
if model_size == "large":
_lowerCAmelCase : Any = torch.Size((1, 42, 1024) )
_lowerCAmelCase : Any = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_lowerCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_lowerCAmelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1, 1024) )
_lowerCAmelCase : Any = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_lowerCAmelCase : int = torch.Size((1, 1, 768) )
_lowerCAmelCase : List[Any] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCamelCase_ ) )
model.save_pretrained(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
_lowerCAmelCase : int = {}
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase_ ):
_lowerCAmelCase : Optional[Any] = line.rstrip().split("""\t""" )
_lowerCAmelCase : Union[str, Any] = index
return entity_vocab
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_lowerCamelCase : str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 720 |
from math import loga
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
lowerCAmelCase :str = inspect.getfile(accelerate.test_utils )
lowerCAmelCase :Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase :Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCAmelCase :Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCAmelCase__ ( self : List[str] ) -> str:
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase :List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase :Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
lowerCAmelCase :Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowerCAmelCase :Union[str, Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(a__ , env=os.environ.copy() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = (accelerator.state.process_index + 2, 10)
__SCREAMING_SNAKE_CASE = torch.randint(0, 10, shape).to(accelerator.device)
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__SCREAMING_SNAKE_CASE = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 553 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase ( a , a , a ) -> float:
'''simple docstring'''
__magic_name__ = x
__magic_name__ = y
for step in range(a ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def UpperCamelCase ( a = 800 , a = 600 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
'''simple docstring'''
__magic_name__ = Image.new('''RGB''' , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(a )
else:
__magic_name__ = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 432 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 609 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(snake_case , "num_encoder_blocks" ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=6_4 , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[1_6, 3_2, 6_4, 1_2_8] , snake_case=[1, 4, 8, 1_6] , snake_case=[1, 2, 4, 8] , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : List[Any] = num_encoder_blocks
UpperCAmelCase : Dict = sr_ratios
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : Optional[Any] = hidden_sizes
UpperCAmelCase : Union[str, Any] = downsampling_rates
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Any = use_labels
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : int = initializer_range
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Optional[Any] = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = SegformerModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case )
UpperCAmelCase : List[str] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Optional[Any] = SegformerForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase : Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = 1
UpperCAmelCase : List[Any] = SegformerForSemanticSegmentation(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case )
UpperCAmelCase : Optional[int] = model(snake_case , labels=snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : int = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = SegformerModelTester(self )
UpperCAmelCase : Dict = SegformerConfigTester(self , config_class=snake_case )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case )
@unittest.skip("SegFormer does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(snake_case )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = True
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : int = outputs.attentions
UpperCAmelCase : Union[str, Any] = sum(self.model_tester.depths )
self.assertEqual(len(snake_case ) , snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : Any = True
UpperCAmelCase : int = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
# verify the first attentions (first block, first layer)
UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCAmelCase : List[str] = (self.model_tester.image_size // 3_2) ** 2
UpperCAmelCase : int = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCAmelCase : Any = len(snake_case )
# Check attention is always last and order is fine
UpperCAmelCase : Dict = True
UpperCAmelCase : int = True
UpperCAmelCase : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 1 , len(snake_case ) )
UpperCAmelCase : int = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
# verify the first attentions (first block, first layer)
UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[str] = outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case ):
continue
UpperCAmelCase : Optional[int] = model_class(snake_case )
model.to(snake_case )
model.train()
UpperCAmelCase : List[Any] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
UpperCAmelCase : Tuple = model(**snake_case ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self ):
'''simple docstring'''
pass
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = SegformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Any = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case )
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : List[Any] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(snake_case )
UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Dict = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(snake_case )
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : Optional[Any] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : int = model(snake_case )
UpperCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : int = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-1 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Any = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : str = model(snake_case )
UpperCAmelCase : int = outputs.logits.detach().cpu()
UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase : Tuple = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case )
UpperCAmelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=snake_case )
UpperCAmelCase : List[str] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , snake_case )
| 609 | 1 |
"""simple docstring"""
import math
def lowercase (snake_case__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (snake_case__ : float = 0.1 ) -> int:
'''simple docstring'''
lowerCAmelCase = 3
lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _a ( a :Union[str, Any] , a :tuple , a :Path , a :List[Any] , a :Union[str, Any] , a :Optional[int] , a :List[Any] , a :Union[str, Any]=False , ) -> Any:
output_path.parent.mkdir(parents=a , exist_ok=a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , use_external_data_format=a , enable_onnx_checker=a , opset_version=a , )
else:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , opset_version=a , )
@torch.no_grad()
def _a ( a :str , a :str , a :int , a :bool = False ) -> Union[str, Any]:
a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
a = '''cpu'''
a = Path(a )
# VAE DECODER
a = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
a = vae_decoder.config.latent_channels
# forward only through the decoder part
a = vae_decoder.decode
onnx_export(
a , model_args=(
torch.randn(1 , a , 25 , 25 ).to(device=a , dtype=a ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 117 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__)
class A_ ( __a ):
_A :Any = ['''pixel_values''']
def __init__( self : Dict , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : bool = True , **snake_case__ : int , ):
super().__init__(**snake_case__ )
lowercase = size if size is not None else {"""shortest_edge""": 2_24}
lowercase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowercase = crop_size if crop_size is not None else {"""height""": 2_56, """width""": 2_56}
lowercase = get_size_dict(snake_case__ , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PIL.Image.BILINEAR , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[Any] , ):
lowercase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : int , ):
lowercase = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ):
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(snake_case__ , data_format=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : bool = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Dict , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(snake_case__ , param_name="""crop_size""" )
lowercase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowercase = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase = [self.flip_channel_order(image=snake_case__ ) for image in images]
lowercase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : str , snake_case__ : List[Tuple] = None ):
lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(snake_case__ ):
lowercase = target_sizes.numpy()
lowercase = []
for idx in range(len(snake_case__ ) ):
lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=snake_case__ )
lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
lowercase = logits.argmax(dim=1 )
lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 72 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''')
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : List[str] ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : Dict =[]
__SCREAMING_SNAKE_CASE : List[str] =[
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if weight_type is not None:
lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape
else:
lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
elif weight_type == "running_mean":
lowercase = value
elif weight_type == "running_var":
lowercase = value
elif weight_type == "num_batches_tracked":
lowercase = value
else:
lowercase = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase , lowercase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = []
if task == "s2t":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2T
lowercase = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase = None
lowercase = MAPPING_T2S
lowercase = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2S
lowercase = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ):
logger.info(f"""{name} was ignored""" )
continue
lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,)
lowercase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase , lowercase = key.split(""".*.""" )
if prefix in name and suffix in name:
lowercase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ )
if "weight_g" in name:
lowercase = """weight_g"""
elif "weight_v" in name:
lowercase = """weight_v"""
elif "bias" in name:
lowercase = """bias"""
elif "weight" in name:
lowercase = """weight"""
elif "running_mean" in name:
lowercase = """running_mean"""
elif "running_var" in name:
lowercase = """running_var"""
elif "num_batches_tracked" in name:
lowercase = """num_batches_tracked"""
else:
lowercase = None
set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = full_name.split("""conv_layers.""" )[-1]
lowercase = name.split(""".""" )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,):
if config_path is not None:
lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = SpeechTaConfig()
if task == "s2t":
lowercase = config.max_text_positions
lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
lowercase = 1_876
lowercase = 600
lowercase = config.max_speech_positions
lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
lowercase = 1_876
lowercase = config.max_speech_positions
lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ )
lowercase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowercase = SpeechTaFeatureExtractor()
lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowercase = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 72 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ =Features({"""image""": Image()} )
a_ =Features({"""labels""": ClassLabel} )
a_ ="image"
a_ ="labels"
def _lowercase ( self : Optional[int] , _a : List[Any] ) -> int:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__lowerCamelCase : Any = copy.deepcopy(self )
__lowerCamelCase : str = self.label_schema.copy()
__lowerCamelCase : Dict = features[self.label_column]
__lowerCamelCase : Optional[Any] = label_schema
return task_template
@property
def _lowercase ( self : List[str] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 459 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase = 'docs/source/en/_toctree.yml'
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Optional[int] = defaultdict(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCAmelCase )
__lowerCamelCase : Dict = new_doc_list
__lowerCamelCase : Optional[Any] = [key for key, value in counts.items() if value > 1]
__lowerCamelCase : int = []
for duplicate_key in duplicates:
__lowerCamelCase : int = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__lowerCamelCase : Dict = sorted(_lowerCAmelCase ,key=lambda _lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCAmelCase )
# Sort
return overview_doc
def a_ ( _lowerCAmelCase=False ) -> Optional[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : List[str] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowerCamelCase : List[Any] = api_doc[scheduler_idx]['sections']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
__lowerCamelCase : str = False
if new_scheduler_doc != scheduler_doc:
__lowerCamelCase : int = True
if overwrite:
__lowerCamelCase : Any = new_scheduler_doc
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def a_ ( _lowerCAmelCase=False ) -> List[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : Optional[Any] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowerCamelCase : int = False
__lowerCamelCase : str = api_doc[pipeline_idx]['sections']
__lowerCamelCase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowerCamelCase : str = pipeline_doc['section']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
if overwrite:
__lowerCamelCase : Union[str, Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCAmelCase )
# sort overall pipeline doc
__lowerCamelCase : int = clean_doc_toc(_lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__lowerCamelCase : Tuple = True
if overwrite:
__lowerCamelCase : int = new_pipeline_docs
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 459 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCAmelCase_ ):
for x in range(UpperCAmelCase_ ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 180 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
a_ : Dict = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
a_ : Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
a_ : Optional[int] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
a_ : List[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
a_ : int = out / out.max() * 255
a_ : Union[str, Any] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 445 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=4 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase , )
return config, input_ids, attention_mask
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxDistilBertModelTester(self )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("distilbert-base-uncased" )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
lowerCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowerCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase )
lowerCamelCase_ = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 445 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[Any] = "falcon"
A__ : Optional[int] = ["past_key_values"]
def __init__( self , SCREAMING_SNAKE_CASE__=65024 , SCREAMING_SNAKE_CASE__=4544 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=71 , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=11 , SCREAMING_SNAKE_CASE__=11 , **SCREAMING_SNAKE_CASE__ , ) -> Any:
A__ = vocab_size
# Backward compatibility with n_embed kwarg
A__ = kwargs.pop("n_embed" , SCREAMING_SNAKE_CASE__ )
A__ = hidden_size if n_embed is None else n_embed
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = hidden_dropout
A__ = attention_dropout
A__ = bos_token_id
A__ = eos_token_id
A__ = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ = alibi
A__ = new_decoder_architecture
A__ = multi_query # Ignored when new_decoder_architecture is True
A__ = parallel_attn
A__ = bias
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def snake_case__ ( self ) -> Tuple:
return self.hidden_size // self.num_attention_heads
@property
def snake_case__ ( self ) -> int:
return not self.alibi
| 104 |
def _lowercase ( __UpperCamelCase : list ):
snake_case__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
snake_case__ = True
for i in range(0 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
for i in range(1 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCAmelCase : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase : int = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 214 | 0 |
import numpy
# List of input, output pairs
lowerCamelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCamelCase__ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
lowerCamelCase__ = [2, 4, 1, 5]
lowerCamelCase__ = len(train_data)
lowerCamelCase__ = 0.009
def A(__a: List[str] , __a: int="train" ):
return calculate_hypothesis_value(__a , __a ) - output(
__a , __a )
def A(__a: List[str] ):
lowerCAmelCase_ = 0
for i in range(len(__a ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A(__a: int , __a: int ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A(__a: Optional[int] , __a: int ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A(__a: Union[str, Any] , __a: str=m ):
lowerCAmelCase_ = 0
for i in range(__a ):
if index == -1:
summation_value += _error(__a )
else:
summation_value += _error(__a ) * train_data[i][0][index]
return summation_value
def A(__a: List[Any] ):
lowerCAmelCase_ = summation_of_cost_derivative(__a , __a ) / m
return cost_derivative_value
def A():
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase_ = 0.00_0002
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
while True:
j += 1
lowerCAmelCase_ = [0, 0, 0, 0]
for i in range(0 , len(__a ) ):
lowerCAmelCase_ = get_cost_derivative(i - 1 )
lowerCAmelCase_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__a , __a , atol=__a , rtol=__a , ):
break
lowerCAmelCase_ = temp_parameter_vector
print(("Number of iterations:", j) )
def A():
for i in range(len(__a ) ):
print(("Actual output value:", output(__a , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__a , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 226 |
from functools import lru_cache
def A(__a: int ):
lowerCAmelCase_ = 2
lowerCAmelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__a )
if n > 1:
factors.add(__a )
return factors
@lru_cache
def A(__a: int ):
return len(unique_prime_factors(__a ) )
def A(__a: list ):
return len(set(__a ) ) in (0, 1)
def A(__a: int ):
lowerCAmelCase_ = 2
while True:
# Increment each value of a generated range
lowerCAmelCase_ = [base + i for i in range(__a )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase_ = [upf_len(__a ) for x in group]
checker.append(__a )
# If all numbers in the list are equal, return the group variable.
if equality(__a ):
return group
# Increment our base variable by 1
base += 1
def A(__a: int = 4 ):
lowerCAmelCase_ = run(__a )
return results[0] if len(__a ) else None
if __name__ == "__main__":
print(solution())
| 226 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_a = tau * frequency / samplerate
_a = sin(_UpperCamelCase )
_a = cos(_UpperCamelCase )
_a = _sin / (2 * q_factor)
_a = (1 - _cos) / 2
_a = 1 - _cos
_a = 1 + alpha
_a = -2 * _cos
_a = 1 - alpha
_a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_a = tau * frequency / samplerate
_a = sin(_UpperCamelCase )
_a = cos(_UpperCamelCase )
_a = _sin / (2 * q_factor)
_a = (1 + _cos) / 2
_a = -1 - _cos
_a = 1 + alpha
_a = -2 * _cos
_a = 1 - alpha
_a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_a = tau * frequency / samplerate
_a = sin(_UpperCamelCase )
_a = cos(_UpperCamelCase )
_a = _sin / (2 * q_factor)
_a = _sin / 2
_a = 0
_a = -ba
_a = 1 + alpha
_a = -2 * _cos
_a = 1 - alpha
_a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
_a = tau * frequency / samplerate
_a = sin(_UpperCamelCase )
_a = cos(_UpperCamelCase )
_a = _sin / (2 * q_factor)
_a = 1 - alpha
_a = -2 * _cos
_a = 1 + alpha
_a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
_a = tau * frequency / samplerate
_a = sin(_UpperCamelCase )
_a = cos(_UpperCamelCase )
_a = _sin / (2 * q_factor)
_a = 10 ** (gain_db / 40)
_a = 1 + alpha * big_a
_a = -2 * _cos
_a = 1 - alpha * big_a
_a = 1 + alpha / big_a
_a = -2 * _cos
_a = 1 - alpha / big_a
_a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
_a = tau * frequency / samplerate
_a = sin(_UpperCamelCase )
_a = cos(_UpperCamelCase )
_a = _sin / (2 * q_factor)
_a = 10 ** (gain_db / 40)
_a = (big_a + 1) - (big_a - 1) * _cos
_a = (big_a + 1) + (big_a - 1) * _cos
_a = (big_a - 1) - (big_a + 1) * _cos
_a = (big_a - 1) + (big_a + 1) * _cos
_a = 2 * sqrt(_UpperCamelCase ) * alpha
_a = big_a * (pmc + aaa)
_a = 2 * big_a * mpc
_a = big_a * (pmc - aaa)
_a = ppmc + aaa
_a = -2 * pmpc
_a = ppmc - aaa
_a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
_a = tau * frequency / samplerate
_a = sin(_UpperCamelCase )
_a = cos(_UpperCamelCase )
_a = _sin / (2 * q_factor)
_a = 10 ** (gain_db / 40)
_a = (big_a + 1) - (big_a - 1) * _cos
_a = (big_a + 1) + (big_a - 1) * _cos
_a = (big_a - 1) - (big_a + 1) * _cos
_a = (big_a - 1) + (big_a + 1) * _cos
_a = 2 * sqrt(_UpperCamelCase ) * alpha
_a = big_a * (ppmc + aaa)
_a = -2 * big_a * pmpc
_a = big_a * (ppmc - aaa)
_a = pmc + aaa
_a = 2 * mpc
_a = pmc - aaa
_a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 487 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase :List[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase ( __snake_case ):
a: bool = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
a: bool = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
a: Optional[int] = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
a: Optional[int] = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
a: Optional[Union[str, Path, GenerationConfig]] = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _A ( self: Optional[Any] ):
_a = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = v.to_dict()
return d
| 487 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : int = parent
__A : int = batch_size
__A : int = image_size
__A : int = num_channels
__A : Tuple = embeddings_size
__A : Any = hidden_sizes
__A : Dict = depths
__A : List[str] = is_training
__A : Tuple = use_labels
__A : List[str] = hidden_act
__A : Optional[Any] = num_labels
__A : Optional[Any] = scope
__A : Tuple = len(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : Union[str, Any] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = FlaxRegNetModel(config=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : Union[str, Any] = FlaxRegNetForImageClassification(config=_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.prepare_config_and_inputs()
__A ,__A : Any = config_and_inputs
__A : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = FlaxRegNetModelTester(self)
__A : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
@unittest.skip(reason='RegNet does not use inputs_embeds')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Any = model_class(_UpperCAmelCase)
__A : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Union[str, Any] = [*signature.parameters.keys()]
__A : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Dict = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase) , expected_num_stages + 1)
__A ,__A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase)
with self.subTest('JIT Enabled'):
__A : Union[str, Any] = model_jitted(**_UpperCAmelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__A : Any = model_jitted(**_UpperCAmelCase).to_tuple()
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
def _lowerCAmelCase ( ) -> Optional[int]:
__A : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040')
__A : Dict = self.default_image_processor
__A : List[str] = prepare_img()
__A : Dict = image_processor(images=_UpperCAmelCase , return_tensors='np')
__A : Tuple = model(**_UpperCAmelCase)
# verify the logits
__A : str = (1, 1000)
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4)) | 338 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base')
__A : int = AutoTokenizer.from_pretrained('xlm-roberta-base')
__A : List[str] = 'The dog is cute and lives in the garden house'
__A : int = jnp.array([tokenizer.encode(_UpperCAmelCase)])
__A : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__A : Optional[Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]])
__A : Union[str, Any] = model(_UpperCAmelCase)['last_hidden_state']
self.assertEqual(output.shape , _UpperCAmelCase)
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3)) | 338 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCamelCase = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "facebook/nllb-200-distilled-600M"
UpperCAmelCase__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
UpperCAmelCase__ = "translator"
UpperCAmelCase__ = AutoTokenizer
UpperCAmelCase__ = AutoModelForSeqaSeqLM
UpperCAmelCase__ = LANGUAGE_CODES
UpperCAmelCase__ = ["text", "text", "text"]
UpperCAmelCase__ = ["text"]
def lowerCamelCase__ ( self : List[Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Union[str, Any]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__: Union[str, Any] = self.lang_to_code[src_lang]
__magic_name__: int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__snake_case , return_tensors="""pt""" , src_lang=__snake_case , tgt_lang=__snake_case )
def lowerCamelCase__ ( self : Any , __snake_case : Tuple ) -> Any:
return self.model.generate(**__snake_case )
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__snake_case )
| 96 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_snake_case = get_logger(__name__)
_snake_case = Path(__file__).parent / 'model_card_template.md'
_snake_case = uuida().hex
_snake_case = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_snake_case = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_snake_case = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _A ( snake_case = None ) -> str:
_lowercase : Dict = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def _A ( snake_case , snake_case = None , snake_case = None ) -> Optional[Any]:
if token is None:
_lowercase : List[Any] = HfFolder.get_token()
if organization is None:
_lowercase : Tuple = whoami(snake_case )["name"]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def _A ( snake_case , snake_case ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
_lowercase : Tuple = args.hub_token if hasattr(snake_case , "hub_token" ) else None
_lowercase : Optional[int] = get_full_repo_name(snake_case , token=snake_case )
_lowercase : List[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
_lowercase : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def _A ( snake_case , snake_case = None ) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
_lowercase : Optional[int] = str(Path(snake_case ).as_posix() )
_lowercase : Dict = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
_lowercase : Union[str, Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_snake_case = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_snake_case = os.path.join(hf_cache_home, 'diffusers')
def _A ( snake_case = None , snake_case = None ) -> None:
if new_cache_dir is None:
_lowercase : Optional[int] = DIFFUSERS_CACHE
if old_cache_dir is None:
_lowercase : Any = old_diffusers_cache
_lowercase : int = Path(snake_case ).expanduser()
_lowercase : int = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_lowercase : int = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_snake_case = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_snake_case = 0
else:
with open(cache_version_file) as f:
try:
_snake_case = int(f.read())
except ValueError:
_snake_case = 0
if cache_version < 1:
_snake_case = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_snake_case = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def _A ( snake_case , snake_case = None ) -> str:
if variant is not None:
_lowercase : Any = weights_name.split("." )
_lowercase : str = splits[:-1] + [variant] + splits[-1:]
_lowercase : List[str] = ".".join(snake_case )
return weights_name
def _A ( snake_case , *,
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None , ) -> Optional[Any]:
_lowercase : Tuple = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
_lowercase : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
_lowercase : List[Any] = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
_lowercase : List[str] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.''' , snake_case , )
try:
# 2. Load model file as usual
_lowercase : Tuple = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 245 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__(self : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=3 , _snake_case : Tuple=32 , _snake_case : Any=3 , _snake_case : List[Any]=10 , _snake_case : List[Any]=[10, 20, 30, 40] , _snake_case : Union[str, Any]=[1, 1, 2, 1] , _snake_case : Tuple=True , _snake_case : Any=True , _snake_case : Optional[Any]="relu" , _snake_case : str=3 , _snake_case : Tuple=None , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : str = embeddings_size
lowerCamelCase_ : List[str] = hidden_sizes
lowerCamelCase_ : str = depths
lowerCamelCase_ : int = is_training
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[str] = num_labels
lowerCamelCase_ : List[str] = scope
lowerCamelCase_ : List[Any] = len(_snake_case )
def UpperCAmelCase_ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : str = self.get_config()
return config, pixel_values
def UpperCAmelCase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : Any , _snake_case : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = FlaxRegNetModel(config=_snake_case )
lowerCamelCase_ : str = model(_snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ (self : Tuple , _snake_case : Any , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : int = FlaxRegNetForImageClassification(config=_snake_case )
lowerCamelCase_ : Any = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ : int = config_and_inputs
lowerCamelCase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase_ : int = False
lowerCamelCase_ : str = False
lowerCamelCase_ : Any = False
def UpperCAmelCase_ (self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : str = FlaxRegNetModelTester(self )
lowerCamelCase_ : Tuple = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCAmelCase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ (self : int ) -> Optional[int]:
"""simple docstring"""
return
def UpperCAmelCase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase_ (self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCAmelCase_ (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCAmelCase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(_snake_case )
lowerCamelCase_ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Dict = [*signature.parameters.keys()]
lowerCamelCase_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : int , _snake_case : Dict , _snake_case : Optional[Any] ):
lowerCamelCase_ : Optional[Any] = model_class(_snake_case )
lowerCamelCase_ : Dict = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCamelCase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
lowerCamelCase_ , lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Tuple = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCAmelCase_ (self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Tuple = self._prepare_for_class(_snake_case , _snake_case )
lowerCamelCase_ : Dict = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case : Optional[Any] , **_snake_case : Tuple ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest('JIT Enabled' ):
lowerCamelCase_ : str = model_jitted(**_snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase_ : List[Any] = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( ) -> str:
lowerCamelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : str = prepare_img()
lowerCamelCase_ : int = image_processor(images=_snake_case , return_tensors='np' )
lowerCamelCase_ : Union[str, Any] = model(**_snake_case )
# verify the logits
lowerCamelCase_ : Any = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCamelCase_ : Optional[int] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
| 144 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Tuple = 'umt5'
lowerCamelCase_ : Any = ['past_key_values']
def __init__(self : Tuple , _snake_case : Optional[int]=25_0112 , _snake_case : str=512 , _snake_case : Optional[int]=64 , _snake_case : Dict=1024 , _snake_case : Tuple=8 , _snake_case : Dict=None , _snake_case : Dict=6 , _snake_case : int=32 , _snake_case : Optional[int]=128 , _snake_case : Tuple=0.1 , _snake_case : List[Any]=1e-6 , _snake_case : List[Any]=1.0 , _snake_case : Optional[int]="gated-gelu" , _snake_case : Tuple=True , _snake_case : Tuple=True , _snake_case : List[str]="T5Tokenizer" , _snake_case : int=True , _snake_case : Any=0 , _snake_case : Optional[Any]=1 , _snake_case : str=0 , **_snake_case : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_snake_case , tokenizer_class=_snake_case , tie_word_embeddings=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
lowerCamelCase_ : int = vocab_size
lowerCamelCase_ : List[str] = d_model
lowerCamelCase_ : Tuple = d_kv
lowerCamelCase_ : Tuple = d_ff
lowerCamelCase_ : List[Any] = num_layers
lowerCamelCase_ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase_ : int = num_heads
lowerCamelCase_ : str = relative_attention_num_buckets
lowerCamelCase_ : List[Any] = relative_attention_max_distance
lowerCamelCase_ : str = dropout_rate
lowerCamelCase_ : List[str] = layer_norm_epsilon
lowerCamelCase_ : Optional[Any] = initializer_factor
lowerCamelCase_ : Optional[Any] = feed_forward_proj
lowerCamelCase_ : List[Any] = use_cache
lowerCamelCase_ : int = self.feed_forward_proj.split('-' )
lowerCamelCase_ : Optional[int] = act_info[-1]
lowerCamelCase_ : int = act_info[0] == 'gated'
if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
lowerCamelCase_ : Dict = 'gelu_new'
@property
def UpperCAmelCase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase_ (self : Optional[int] ) -> int:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase_ (self : int ) -> str:
"""simple docstring"""
return self.num_layers
class lowerCamelCase__ ( UpperCAmelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase_ (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowerCamelCase_ : Optional[Any] = 'past_encoder_sequence + sequence'
lowerCamelCase_ : List[str] = {0: 'batch'}
lowerCamelCase_ : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCamelCase_ : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
lowerCamelCase_ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase_ (self : List[str] ) -> float:
"""simple docstring"""
return 5e-4
| 144 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCamelCase :int = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
__lowerCamelCase :Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Dict=False ) -> List[Any]:
lowerCamelCase , lowerCamelCase : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , UpperCamelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=UpperCamelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = R""".*sequential.(\d+).*"""
lowerCamelCase : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase : List[Any] = key.replace(UpperCamelCase__ , UpperCamelCase__ )
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
# replace sequential layers with list
lowerCamelCase : List[Any] = re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 )
lowerCamelCase : Union[str, Any] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(UpperCamelCase__ )//3}.linear.' )
elif re.match(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = int(re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCamelCase : Any = 1 if projecton_layer == 0 else 2
lowerCamelCase : str = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCamelCase : int = value
lowerCamelCase : Dict = mixed_qkv.size(0 ) // 3
lowerCamelCase : Tuple = mixed_qkv[:qkv_dim]
lowerCamelCase : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCamelCase : Any = mixed_qkv[qkv_dim * 2 :]
lowerCamelCase : Any = query_layer
lowerCamelCase : Any = key_layer
lowerCamelCase : List[Any] = value_layer
else:
lowerCamelCase : List[str] = value
return model_state_dict
def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=False ) -> Tuple:
lowerCamelCase , lowerCamelCase : Any = init_clap(UpperCamelCase__ , enable_fusion=UpperCamelCase__ )
clap_model.eval()
lowerCamelCase : int = clap_model.state_dict()
lowerCamelCase : Optional[int] = rename_state_dict(UpperCamelCase__ )
lowerCamelCase : Optional[int] = ClapConfig()
lowerCamelCase : Optional[Any] = enable_fusion
lowerCamelCase : List[Any] = ClapModel(UpperCamelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
transformers_config.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
__lowerCamelCase :Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 222 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase :Union[str, Any] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] ='''deit'''
def __init__( self: Optional[int] , __a: Optional[int]=768 , __a: int=12 , __a: List[Any]=12 , __a: List[Any]=3_072 , __a: Any="gelu" , __a: Optional[Any]=0.0 , __a: Dict=0.0 , __a: Dict=0.02 , __a: int=1e-1_2 , __a: int=224 , __a: Tuple=16 , __a: List[Any]=3 , __a: Union[str, Any]=True , __a: Union[str, Any]=16 , **__a: int , )-> Union[str, Any]:
super().__init__(**__a )
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[str] = image_size
lowerCamelCase : int = patch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Tuple = encoder_stride
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =version.parse('''1.11''')
@property
def a__ ( self: Optional[int] )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self: Union[str, Any] )-> float:
return 1e-4
| 222 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( _snake_case , unittest.TestCase ):
lowercase = DebertaTokenizer
lowercase = True
lowercase = DebertaTokenizerFast
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ = {"""unk_token""": """[UNK]"""}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = """lower newer"""
A_ = """lower newer"""
return input_text, output_text
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = """lower newer"""
A_ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = tokens + [tokenizer.unk_token]
A_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = tokenizer("""Hello""" , """World""" )
A_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A_ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ )
A_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ )
A_ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
A_ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A_ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A_ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
A_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
A_ = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding["""input_ids"""]]
# fmt: off
A_ = {
"""input_ids""": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A_ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCamelCase__ )
for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 667 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =AudioLDMPipeline
UpperCamelCase =TEXT_TO_AUDIO_PARAMS
UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def _lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__lowercase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCamelCase_ , )
__lowercase : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowercase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase : List[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
__lowercase : Optional[int] = ClapTextModelWithProjection(UpperCamelCase_ )
__lowercase : Dict = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
__lowercase : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase_ , )
__lowercase : Union[str, Any] = SpeechTaHifiGan(UpperCamelCase_ )
__lowercase : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> Any:
if str(UpperCamelCase_ ).startswith('''mps''' ):
__lowercase : Union[str, Any] = torch.manual_seed(UpperCamelCase_ )
else:
__lowercase : List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowercase : Optional[Any] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase : str = self.get_dummy_components()
__lowercase : Tuple = AudioLDMPipeline(**UpperCamelCase_ )
__lowercase : Optional[int] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : Optional[Any] = audioldm_pipe(**UpperCamelCase_ )
__lowercase : Any = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 2_56
__lowercase : Dict = audio[:10]
__lowercase : Any = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : Dict = AudioLDMPipeline(**UpperCamelCase_ )
__lowercase : Tuple = audioldm_pipe.to(UpperCamelCase_ )
__lowercase : Any = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : Any = 3 * [inputs['''prompt''']]
# forward
__lowercase : Dict = audioldm_pipe(**UpperCamelCase_ )
__lowercase : int = output.audios[0]
__lowercase : int = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : Optional[int] = 3 * [inputs.pop('''prompt''' )]
__lowercase : Optional[Any] = audioldm_pipe.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
__lowercase : Tuple = text_inputs['''input_ids'''].to(UpperCamelCase_ )
__lowercase : List[Any] = audioldm_pipe.text_encoder(
UpperCamelCase_ , )
__lowercase : Any = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowercase : Tuple = F.normalize(UpperCamelCase_ , dim=-1 )
__lowercase : Optional[int] = prompt_embeds
# forward
__lowercase : Any = audioldm_pipe(**UpperCamelCase_ )
__lowercase : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = self.get_dummy_components()
__lowercase : Tuple = AudioLDMPipeline(**UpperCamelCase_ )
__lowercase : Dict = audioldm_pipe.to(UpperCamelCase_ )
__lowercase : Union[str, Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : Dict = 3 * ['''this is a negative prompt''']
__lowercase : Optional[int] = negative_prompt
__lowercase : Tuple = 3 * [inputs['''prompt''']]
# forward
__lowercase : int = audioldm_pipe(**UpperCamelCase_ )
__lowercase : Optional[int] = output.audios[0]
__lowercase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : int = 3 * [inputs.pop('''prompt''' )]
__lowercase : Optional[int] = []
for p in [prompt, negative_prompt]:
__lowercase : List[Any] = audioldm_pipe.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , )
__lowercase : Tuple = text_inputs['''input_ids'''].to(UpperCamelCase_ )
__lowercase : int = audioldm_pipe.text_encoder(
UpperCamelCase_ , )
__lowercase : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowercase : str = F.normalize(UpperCamelCase_ , dim=-1 )
embeds.append(UpperCamelCase_ )
__lowercase ,__lowercase : Union[str, Any] = embeds
# forward
__lowercase : int = audioldm_pipe(**UpperCamelCase_ )
__lowercase : List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase : Any = self.get_dummy_components()
__lowercase : Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
__lowercase : Tuple = AudioLDMPipeline(**UpperCamelCase_ )
__lowercase : Tuple = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : Dict = '''egg cracking'''
__lowercase : int = audioldm_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
__lowercase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 2_56
__lowercase : str = audio[:10]
__lowercase : Any = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Dict:
__lowercase : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase : Tuple = self.get_dummy_components()
__lowercase : str = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
__lowercase : Dict = AudioLDMPipeline(**UpperCamelCase_ )
__lowercase : Tuple = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Any = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
__lowercase : Union[str, Any] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__lowercase : Union[str, Any] = 2
__lowercase : List[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
__lowercase : Tuple = 2
__lowercase : int = audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
__lowercase : str = 2
__lowercase : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase : str = self.get_dummy_components()
__lowercase : List[Any] = AudioLDMPipeline(**UpperCamelCase_ )
__lowercase : Optional[int] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
__lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
__lowercase : List[str] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **UpperCamelCase_ )
__lowercase : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.0_1_6
__lowercase : int = audioldm_pipe(audio_length_in_s=0.0_3_2 , **UpperCamelCase_ )
__lowercase : Any = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.0_3_2
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : str = self.get_dummy_components()
__lowercase : int = AudioLDMPipeline(**UpperCamelCase_ )
__lowercase : Optional[Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Optional[Any] = ['''hey''']
__lowercase : List[Any] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 )
__lowercase : Optional[Any] = output.audios.shape
assert audio_shape == (1, 2_56)
__lowercase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__lowercase : List[Any] = SpeechTaHifiGan(UpperCamelCase_ ).to(UpperCamelCase_ )
__lowercase : Union[str, Any] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 )
__lowercase : Optional[int] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def _lowerCamelCase ( self ) -> Dict:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase_ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCamelCase ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ )
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ) -> Any:
__lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowercase : Optional[int] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 8, 1_28, 16) )
__lowercase : List[str] = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
__lowercase : str = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Tuple = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
__lowercase : Optional[Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : List[Any] = self.get_inputs(UpperCamelCase_ )
__lowercase : Union[str, Any] = 25
__lowercase : Dict = audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 8_19_20
__lowercase : Optional[int] = audio[7_72_30:7_72_40]
__lowercase : Tuple = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
__lowercase : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[str] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
__lowercase : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__lowercase : Optional[int] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase : Any = self.get_inputs(UpperCamelCase_ )
__lowercase : List[Any] = audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 8_19_20
__lowercase : int = audio[2_77_80:2_77_90]
__lowercase : Dict = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
__lowercase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 76 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 631 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : int ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """roc_bert"""
def __init__( self: Optional[Any] , UpperCamelCase__: Any=30_522 , UpperCamelCase__: Optional[Any]=768 , UpperCamelCase__: Union[str, Any]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: Tuple=3_072 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: Dict=512 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Tuple=1e-12 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[Any]="absolute" , UpperCamelCase__: Any=None , UpperCamelCase__: Any=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Union[str, Any]=768 , UpperCamelCase__: int=910 , UpperCamelCase__: Tuple=512 , UpperCamelCase__: int=24_858 , UpperCamelCase__: Optional[Any]=True , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = enable_pronunciation
lowerCamelCase__ : Union[str, Any] = enable_shape
lowerCamelCase__ : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase__ : Any = pronunciation_vocab_size
lowerCamelCase__ : int = shape_embed_dim
lowerCamelCase__ : Tuple = shape_vocab_size
lowerCamelCase__ : Optional[Any] = concat_input
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__magic_name__ =logging.getLogger(__name__)
def __UpperCamelCase ( A , A ):
return (preds == labels).mean()
@dataclass
class _A :
SCREAMING_SNAKE_CASE_ : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _A :
SCREAMING_SNAKE_CASE_ : str =field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ : str =field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ : int =field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , A )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ = processors[data_args.task_name]()
UpperCamelCase__ = processor.get_labels()
UpperCamelCase__ = len(A )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A ) -> Dict:
UpperCamelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A , p.label_ids )}
# Data collator
UpperCamelCase__ = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(A , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , A , A )
writer.write('''%s = %s\n''' % (key, value) )
results.update(A )
return results
def __UpperCamelCase ( A ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 415 | import os
import jsonlines
import numpy as np
from tqdm import tqdm
__magic_name__ =2048
__magic_name__ =4096
__magic_name__ =42
__magic_name__ =os.environ.pop('''PROCESS_TRAIN''', '''false''')
__magic_name__ ={'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __UpperCamelCase ( A ):
def choose_first(A , A=False ):
assert isinstance(A , A )
if len(A ) == 1:
UpperCamelCase__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCamelCase__ = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
UpperCamelCase__ = {'''id''': example['''id''']}
UpperCamelCase__ = example['''annotations''']
UpperCamelCase__ = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCamelCase__ = ['''yes'''] if 1 in yes_no_answer else ['''no''']
UpperCamelCase__ = UpperCamelCase__ = []
UpperCamelCase__ = UpperCamelCase__ = []
UpperCamelCase__ = ['''<cls>''']
else:
UpperCamelCase__ = ['''short''']
UpperCamelCase__ = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
UpperCamelCase__ = ['''long''']
UpperCamelCase__ = choose_first(annotation['''long_answer'''] , is_long_answer=A )
UpperCamelCase__ = []
answer.update(A )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCamelCase__ = True
else:
UpperCamelCase__ = False
UpperCamelCase__ = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , A ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def __UpperCamelCase ( A , A=False ):
UpperCamelCase__ = _get_single_answer(A )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase__ = example['''document''']['''tokens''']
UpperCamelCase__ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(A ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCamelCase__ = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCamelCase__ = example['''document''']['''tokens''']
UpperCamelCase__ = answer['''start_token''']
UpperCamelCase__ = answer['''end_token''']
UpperCamelCase__ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCamelCase__ = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCamelCase__ = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
UpperCamelCase__ = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
UpperCamelCase__ = ''' '''.join([old[i] for i in range(len(A ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , A , end='''\n''' )
print('''Old:''' , A , end='''\n\n''' )
return {
"context": " ".join(A ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __UpperCamelCase ( A , A , A=2048 , A=4096 , A=True ):
# overlap will be of doc_stride - q_len
UpperCamelCase__ = get_context_and_ans(A , assertion=A )
UpperCamelCase__ = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCamelCase__ = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
UpperCamelCase__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = input_ids[:q_len]
UpperCamelCase__ = range(A , len(A ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCamelCase__ = i + max_length - q_len
UpperCamelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(A ),
"end_token": [-100] * len(A ),
"category": category,
},
}
UpperCamelCase__ = out['''context'''].split()
UpperCamelCase__ = splitted_context[answer['''end_token''']]
UpperCamelCase__ = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=A , ).input_ids )
UpperCamelCase__ = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=A ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCamelCase__ = len(tokenizer(A , add_special_tokens=A ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCamelCase__ = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
UpperCamelCase__ = answer['''start_token''']
UpperCamelCase__ = answer['''end_token''']
if assertion:
UpperCamelCase__ = tokenizer.decode(A )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , A , end='''\n\n''' )
if len(A ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCamelCase__ = input_ids[:q_len]
UpperCamelCase__ = range(A , len(A ) , max_length - doc_stride )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCamelCase__ = i + max_length - q_len
UpperCamelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCamelCase__ = start_token - i + q_len
UpperCamelCase__ = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
UpperCamelCase__ = -100
UpperCamelCase__ = -100
answers_category.append('''null''' )
UpperCamelCase__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(A )
answers_end_token.append(A )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(A ) )
print('''Old:''' , tokenizer.decode(A ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __UpperCamelCase ( A , A , A=2048 , A=4096 , A=False ):
UpperCamelCase__ = get_strided_contexts_and_ans(
A , A , doc_stride=A , max_length=A , assertion=A , )
return example
def __UpperCamelCase ( A , A ):
with jsonlines.open(A , '''a''' ) as writer:
for example in tqdm(A , total=len(A ) , desc='''Saving samples ... ''' ):
UpperCamelCase__ = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__magic_name__ =load_dataset('''natural_questions''')
__magic_name__ =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__magic_name__ =data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__magic_name__ ={
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__magic_name__ =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__magic_name__ =data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__magic_name__ ='''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 415 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : int = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 151 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
a__ : Union[str, Any] = cst_fwd.get(__a , np.inf )
a__ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
a__ : List[str] = new_cost_f
a__ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
a__ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase_ ( __a , __a , __a , __a ) -> int:
a__ : Any = -1
a__ : List[str] = set()
a__ : Optional[Any] = set()
a__ : Optional[int] = {source: 0}
a__ : Optional[Any] = {destination: 0}
a__ : List[Any] = {source: None}
a__ : Union[str, Any] = {destination: None}
a__ : PriorityQueue[Any] = PriorityQueue()
a__ : PriorityQueue[Any] = PriorityQueue()
a__ : int = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
a__, a__ : Union[str, Any] = queue_forward.get()
visited_forward.add(__a )
a__, a__ : List[Any] = queue_backward.get()
visited_backward.add(__a )
a__ : Union[str, Any] = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
a__ : Dict = pass_and_relaxation(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
a__ : Tuple = shortest_distance
return shortest_path_distance
UpperCamelCase : Optional[Any] = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase : List[Any] = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__lowerCAmelCase : Dict = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
UpperCAmelCase : Tuple = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = vqa_pipeline(_SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}],
[{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}],
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
UpperCAmelCase : Dict = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCAmelCase : List[Any] = """How many cats are there?"""
UpperCAmelCase : Any = vqa_pipeline(image=_SCREAMING_SNAKE_CASE , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}, {"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}] )
UpperCAmelCase : str = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}, {"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
UpperCAmelCase : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCAmelCase : Any = """How many cats are there?"""
UpperCAmelCase : Union[str, Any] = vqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
UpperCAmelCase : List[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
UpperCAmelCase : Optional[int] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
| 160 |
"""simple docstring"""
A: int = tuple[float, float, float]
A: int = tuple[float, float, float]
def _snake_case ( UpperCamelCase : Pointad , UpperCamelCase : Pointad ):
UpperCAmelCase : List[str] = end_pointa[0] - end_pointa[0]
UpperCAmelCase : Optional[Any] = end_pointa[1] - end_pointa[1]
UpperCAmelCase : int = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCamelCase : Vectorad , UpperCamelCase : Vectorad ):
UpperCAmelCase : Optional[int] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase : Union[str, Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCamelCase : Vectorad , UpperCamelCase : int ):
return tuple(round(UpperCamelCase , UpperCamelCase ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : Pointad , UpperCamelCase : int = 10 ):
UpperCAmelCase : Optional[int] = create_vector(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Dict = create_vector(UpperCamelCase , UpperCamelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
| 160 | 1 |
'''simple docstring'''
import cva
import numpy as np
class snake_case__ :
def __init__( self : List[Any] , _A : float , _A : int ) -> Any:
if k in (0.04, 0.06):
UpperCAmelCase_ : Dict = k
UpperCAmelCase_ : Tuple = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : List[str] ) -> str:
return str(self.k )
def A ( self : Optional[int] , _A : str ) -> tuple[cva.Mat, list[list[int]]]:
UpperCAmelCase_ : List[str] = cva.imread(_A , 0 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = img.shape
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Dict = img.copy()
UpperCAmelCase_ : str = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase_ , UpperCAmelCase_ : Any = np.gradient(_A )
UpperCAmelCase_ : int = dx**2
UpperCAmelCase_ : str = dy**2
UpperCAmelCase_ : str = dx * dy
UpperCAmelCase_ : Union[str, Any] = 0.04
UpperCAmelCase_ : List[Any] = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase_ : int = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : str = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase_ : List[Any] = (wxx * wyy) - (wxy**2)
UpperCAmelCase_ : int = wxx + wyy
UpperCAmelCase_ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_UpperCamelCase : Any = HarrisCorner(0.04, 3)
_UpperCamelCase , _UpperCamelCase : Dict = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 721 |
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple =logging.get_logger(__name__)
A_ : Tuple ={'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class lowercase_ ( _UpperCamelCase):
"""simple docstring"""
snake_case_ = '''openai-gpt'''
snake_case_ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCAmelCase=40_478 , _UpperCAmelCase=512 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase="cls_index" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ):
"""simple docstring"""
a_ = vocab_size
a_ = n_positions
a_ = n_embd
a_ = n_layer
a_ = n_head
a_ = afn
a_ = resid_pdrop
a_ = embd_pdrop
a_ = attn_pdrop
a_ = layer_norm_epsilon
a_ = initializer_range
a_ = summary_type
a_ = summary_use_proj
a_ = summary_activation
a_ = summary_first_dropout
a_ = summary_proj_to_labels
super().__init__(**a_ ) | 483 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """autoformer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : bool = True , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 64 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 32 , a_ : int = 32 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 100 , a_ : float = 0.02 , a_ : bool = True , a_ : Union[str, Any]=True , a_ : int = 10 , a_ : int = 25 , a_ : int = 3 , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = prediction_length
__snake_case = context_length if context_length is not None else prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Autoformer
__snake_case = label_length
__snake_case = moving_average
__snake_case = autocorrelation_factor
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 69 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : Optional[Any] ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=8 ):
'''simple docstring'''
lowerCAmelCase : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
lowerCAmelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
if latents is None:
lowerCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase : Union[str, Any] = latents.to(lowercase_ )
lowerCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self , lowercase_=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase : str = torch.device(f"""cuda:{gpu_id}""" )
lowerCAmelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def _snake_case ( self , lowercase_=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ) -> List[str]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[str, Any]:
lowerCAmelCase : Tuple = self._execution_device
lowerCAmelCase : Optional[int] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Optional[Any] = torch.cat(lowercase_ , dim=0 )
lowerCAmelCase : Dict = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 )
lowerCAmelCase : Optional[Any] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
lowerCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
lowerCAmelCase : List[Any] = self.scheduler.timesteps
lowerCAmelCase : Optional[Any] = self.unet.config.in_channels
lowerCAmelCase , lowerCAmelCase : int = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
lowerCAmelCase : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
lowerCAmelCase : Any = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase , lowerCAmelCase : Any = noise_pred.chunk(2 )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = variance_pred.chunk(2 )
lowerCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase , lowerCAmelCase : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : List[Any] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
lowerCAmelCase : str = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCAmelCase : str = image * 0.5 + 0.5
lowerCAmelCase : Optional[int] = image.clamp(0 , 1 )
lowerCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Optional[int] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 693 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 1 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( a : Union[str, Any] ):
a__ = checkpoints.load_tax_checkpoint(a )
a__ = flatten_dict(a )
return flax_params
def lowerCAmelCase_ ( a : Any ):
a__ = {}
a__ = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
a__ = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
a__ = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
a__ = new_key.replace(a , a )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
a__ = new_key.replace(a , a )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
a__ = re.sub(r'layers_(\d+)' , r'layer.\1' , a )
a__ = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
a__ = re.sub(r'layers_(\d+)' , r'layer.\1' , a )
a__ = flax_dict[key]
a__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
a__ = torch.from_numpy(converted_dict[key].T )
else:
a__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( a : Optional[Any] , a : Union[str, Any] , a : Optional[Any]=False , a : Tuple=False ):
a__ = get_flax_param(a )
if not use_large:
a__ = PixaStructVisionConfig()
a__ = PixaStructTextConfig()
else:
a__ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
a__ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
a__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=a )
a__ = PixaStructForConditionalGeneration(a )
a__ = rename_and_convert_flax_params(a )
model.load_state_dict(a )
a__ = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
a__ = PixaStructImageProcessor()
a__ = PixaStructProcessor(image_processor=a , tokenizer=a )
if use_large:
a__ = 4096
a__ = True
# mkdir if needed
os.makedirs(a , exist_ok=a )
model.save_pretrained(a )
processor.save_pretrained(a )
print('Model saved in {}'.format(a ) )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
__A : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 394 |
'''simple docstring'''
from collections import namedtuple
__A : Union[str, Any] = namedtuple('from_to', 'from_ to')
__A : str = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 10_00),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ ( a : float , a : str , a : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
lowercase__ = jnp.floataa
lowercase__ = True
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
super().setup()
_UpperCamelCase = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *__a , **__a) -> Dict:
'''simple docstring'''
_UpperCamelCase = super().__call__(*__a , **__a)
_UpperCamelCase = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
def cross_entropy(__snake_case, __snake_case, __snake_case=None ):
_UpperCamelCase = logits.shape[-1]
_UpperCamelCase = (labels[..., None] == jnp.arange(__snake_case )[None]).astype('''f4''' )
_UpperCamelCase = jax.nn.log_softmax(__snake_case, axis=-1 )
_UpperCamelCase = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
_UpperCamelCase = reduction(__snake_case )
return loss
_UpperCamelCase = partial(__snake_case, reduction=jnp.mean )
_UpperCamelCase = cross_entropy(__snake_case, __snake_case )
_UpperCamelCase = cross_entropy(__snake_case, __snake_case )
_UpperCamelCase = cross_entropy(__snake_case, __snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _UpperCAmelCase:
lowercase__ = 'google/bigbird-roberta-base'
lowercase__ = 30_00
lowercase__ = 1_05_00
lowercase__ = 1_28
lowercase__ = 3
lowercase__ = 1
lowercase__ = 5
# tx_args
lowercase__ = 3E-5
lowercase__ = 0.0
lowercase__ = 2_00_00
lowercase__ = 0.00_95
lowercase__ = 'bigbird-roberta-natural-questions'
lowercase__ = 'training-expt'
lowercase__ = 'data/nq-training.jsonl'
lowercase__ = 'data/nq-validation.jsonl'
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__a)
_UpperCamelCase = os.path.join(self.base_dir , self.save_dir)
_UpperCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = 40_96 # no dynamic padding on TPUs
def __call__( self , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.collate_fn(__a)
_UpperCamelCase = jax.tree_util.tree_map(__a , __a)
return batch
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.fetch_inputs(features['''input_ids'''])
_UpperCamelCase = {
'''input_ids''': jnp.array(__a , dtype=jnp.intaa),
'''attention_mask''': jnp.array(__a , dtype=jnp.intaa),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa),
}
return batch
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = [self._fetch_inputs(__a) for ids in input_ids]
return zip(*__a)
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = [1 for _ in range(len(__a))]
while len(__a) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None ) -> Optional[Any]:
"""simple docstring"""
if seed is not None:
_UpperCamelCase = dataset.shuffle(seed=__snake_case )
for i in range(len(__snake_case ) // batch_size ):
_UpperCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__snake_case )
@partial(jax.pmap, axis_name='''batch''' )
def lowerCamelCase__ ( __snake_case, __snake_case, **__snake_case ) -> Union[str, Any]:
"""simple docstring"""
def loss_fn(__snake_case ):
_UpperCamelCase = model_inputs.pop('''start_labels''' )
_UpperCamelCase = model_inputs.pop('''end_labels''' )
_UpperCamelCase = model_inputs.pop('''pooled_labels''' )
_UpperCamelCase = state.apply_fn(**__snake_case, params=__snake_case, dropout_rng=__snake_case, train=__snake_case )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = outputs
return state.loss_fn(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, )
_UpperCamelCase , _UpperCamelCase = jax.random.split(__snake_case )
_UpperCamelCase = jax.value_and_grad(__snake_case )
_UpperCamelCase , _UpperCamelCase = grad_fn(state.params )
_UpperCamelCase = jax.lax.pmean({'''loss''': loss}, axis_name='''batch''' )
_UpperCamelCase = jax.lax.pmean(__snake_case, '''batch''' )
_UpperCamelCase = state.apply_gradients(grads=__snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name='''batch''' )
def lowerCamelCase__ ( __snake_case, **__snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop('''start_labels''' )
_UpperCamelCase = model_inputs.pop('''end_labels''' )
_UpperCamelCase = model_inputs.pop('''pooled_labels''' )
_UpperCamelCase = state.apply_fn(**__snake_case, params=state.params, train=__snake_case )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = outputs
_UpperCamelCase = state.loss_fn(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = jax.lax.pmean({'''loss''': loss}, axis_name='''batch''' )
return metrics
class _UpperCAmelCase( train_state.TrainState ):
lowercase__ = struct.field(pytree_node=lowerCamelCase )
@dataclass
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = None
def UpperCAmelCase ( self , __a , __a , __a , __a=None) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = model.params
_UpperCamelCase = TrainState.create(
apply_fn=model.__call__ , params=__a , tx=__a , loss_fn=__a , )
if ckpt_dir is not None:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = restore_checkpoint(__a , __a)
_UpperCamelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_UpperCamelCase , _UpperCamelCase = build_tx(**__a)
_UpperCamelCase = train_state.TrainState(
step=__a , apply_fn=model.__call__ , params=__a , tx=__a , opt_state=__a , )
_UpperCamelCase = args
_UpperCamelCase = data_collator
_UpperCamelCase = lr
_UpperCamelCase = params
_UpperCamelCase = jax_utils.replicate(__a)
return state
def UpperCAmelCase ( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.args
_UpperCamelCase = len(__a) // args.batch_size
_UpperCamelCase = jax.random.PRNGKey(0)
_UpperCamelCase = jax.random.split(__a , jax.device_count())
for epoch in range(args.max_epochs):
_UpperCamelCase = jnp.array(0 , dtype=jnp.floataa)
_UpperCamelCase = get_batched_dataset(__a , args.batch_size , seed=__a)
_UpperCamelCase = 0
for batch in tqdm(__a , total=__a , desc=F'''Running EPOCH-{epoch}'''):
_UpperCamelCase = self.data_collator(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.train_step_fn(__a , __a , **__a)
running_loss += jax_utils.unreplicate(metrics['''loss'''])
i += 1
if i % args.logging_steps == 0:
_UpperCamelCase = jax_utils.unreplicate(state.step)
_UpperCamelCase = running_loss.item() / i
_UpperCamelCase = self.scheduler_fn(state_step - 1)
_UpperCamelCase = self.evaluate(__a , __a)
_UpperCamelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__a))
self.logger.log(__a , commit=__a)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=__a)
def UpperCAmelCase ( self , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = get_batched_dataset(__a , self.args.batch_size)
_UpperCamelCase = len(__a) // self.args.batch_size
_UpperCamelCase = jnp.array(0 , dtype=jnp.floataa)
_UpperCamelCase = 0
for batch in tqdm(__a , total=__a , desc='''Evaluating ... '''):
_UpperCamelCase = self.data_collator(__a)
_UpperCamelCase = self.val_step_fn(__a , **__a)
running_loss += jax_utils.unreplicate(metrics['''loss'''])
i += 1
return running_loss / i
def UpperCAmelCase ( self , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = jax_utils.unreplicate(__a)
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''')
self.model_save_fn(__a , params=state.params)
with open(os.path.join(__a , '''opt_state.msgpack''') , '''wb''') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(__a , '''args.joblib'''))
joblib.dump(self.data_collator , os.path.join(__a , '''data_collator.joblib'''))
with open(os.path.join(__a , '''training_state.json''') , '''w''') as f:
json.dump({'''step''': state.step.item()} , __a)
print('''DONE''')
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''', end=''' ... ''' )
with open(os.path.join(__snake_case, '''flax_model.msgpack''' ), '''rb''' ) as f:
_UpperCamelCase = from_bytes(state.params, f.read() )
with open(os.path.join(__snake_case, '''opt_state.msgpack''' ), '''rb''' ) as f:
_UpperCamelCase = from_bytes(state.opt_state, f.read() )
_UpperCamelCase = joblib.load(os.path.join(__snake_case, '''args.joblib''' ) )
_UpperCamelCase = joblib.load(os.path.join(__snake_case, '''data_collator.joblib''' ) )
with open(os.path.join(__snake_case, '''training_state.json''' ), '''r''' ) as f:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_train_steps - warmup_steps
_UpperCamelCase = optax.linear_schedule(init_value=__snake_case, end_value=__snake_case, transition_steps=__snake_case )
_UpperCamelCase = optax.linear_schedule(init_value=__snake_case, end_value=1e-7, transition_steps=__snake_case )
_UpperCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
def weight_decay_mask(__snake_case ):
_UpperCamelCase = traverse_util.flatten_dict(__snake_case )
_UpperCamelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(__snake_case )
_UpperCamelCase = scheduler_fn(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = optax.adamw(learning_rate=__snake_case, weight_decay=__snake_case, mask=__snake_case )
return tx, lr
| 704 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if openai_config_file == "":
_UpperCamelCase = OpenAIGPTConfig()
else:
_UpperCamelCase = OpenAIGPTConfig.from_json_file(__snake_case )
_UpperCamelCase = OpenAIGPTModel(__snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__snake_case, __snake_case, __snake_case )
# Save pytorch-model
_UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), __snake_case )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_a = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 78 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "informer"
_UpperCamelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 6_4 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.05 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 1_0_0 , _lowerCAmelCase = 0.02 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ):
# time series specific configuration
_lowercase : int = prediction_length
_lowercase : str = context_length or prediction_length
_lowercase : List[str] = distribution_output
_lowercase : List[Any] = loss
_lowercase : Optional[int] = input_size
_lowercase : int = num_time_features
_lowercase : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_lowercase : Tuple = scaling
_lowercase : Any = num_dynamic_real_features
_lowercase : Union[str, Any] = num_static_real_features
_lowercase : int = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_lowercase : Tuple = cardinality
else:
_lowercase : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_lowercase : List[str] = embedding_dimension
else:
_lowercase : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowercase : List[str] = num_parallel_samples
# Transformer architecture configuration
_lowercase : int = input_size * len(self.lags_sequence ) + self._number_of_features
_lowercase : List[str] = d_model
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : str = decoder_attention_heads
_lowercase : List[Any] = encoder_ffn_dim
_lowercase : Dict = decoder_ffn_dim
_lowercase : Any = encoder_layers
_lowercase : List[str] = decoder_layers
_lowercase : Tuple = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : Tuple = activation_dropout
_lowercase : List[str] = encoder_layerdrop
_lowercase : List[Any] = decoder_layerdrop
_lowercase : int = activation_function
_lowercase : Any = init_std
_lowercase : int = use_cache
# Informer
_lowercase : Optional[int] = attention_type
_lowercase : Union[str, Any] = sampling_factor
_lowercase : Optional[int] = distil
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 66 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase_ = """src/transformers"""
# Matches is_xxx_available()
lowercase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase_ = re.compile(R"""^\s*else:""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall('\[([^\]]+)\]' , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCamelCase () -> Tuple:
lowercase__ = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase__ = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(_SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase () -> Optional[int]:
lowercase__ = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '.' )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowercase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCamelCase () -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ = spec.loader.load_module()
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 235 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""facebook/bart-large-mnli"""
snake_case_ =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
snake_case_ ="""text_classifier"""
snake_case_ =AutoTokenizer
snake_case_ =AutoModelForSequenceClassification
snake_case_ =["""text""", ["""text"""]]
snake_case_ =["""text"""]
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
super().setup()
lowerCAmelCase__ : List[str] = self.model.config
lowerCAmelCase__ : List[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
lowerCAmelCase__ : List[Any] = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) ,[f"""This example is {label}""" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = outputs.logits
lowerCAmelCase__ : Union[str, Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 90 |
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : list[int]):
'''simple docstring'''
return len(set(lowerCamelCase_)) == len(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE (_snake_case , unittest.TestCase ):
lowerCAmelCase = CanineTokenizer
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
__A : List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return CanineTokenizer.from_pretrained('google/canine-s')
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
__A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase)
__A : List[Any] = 1024
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.canine_tokenizer
__A : Tuple = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__A : Any = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
__A : Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='pt')
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase)
__A : str = list(batch.input_ids.numpy()[0])
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
self.assertEqual((2, 39) , batch.input_ids.shape)
self.assertEqual((2, 39) , batch.attention_mask.shape)
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.canine_tokenizer
__A : Optional[int] = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__A : Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='pt')
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , _lowerCamelCase)
self.assertIn('attention_mask' , _lowerCamelCase)
self.assertIn('token_type_ids' , _lowerCamelCase)
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.canine_tokenizer
__A : str = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__A : List[str] = tokenizer(
text_target=_lowerCamelCase , max_length=32 , padding='max_length' , truncation=_lowerCamelCase , return_tensors='pt')
self.assertEqual(32 , targets['input_ids'].shape[1])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
__A : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
__A : List[str] = tempfile.mkdtemp()
__A : Any = ''' He is very happy, UNwant\u00E9d,running'''
__A : Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
tokenizer.save_pretrained(_lowerCamelCase)
__A : Optional[Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase)
__A : str = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
shutil.rmtree(_lowerCamelCase)
__A : str = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
__A : Any = tempfile.mkdtemp()
__A : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
__A : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__A : str = chr(0Xe_0_0_7)
additional_special_tokens.append(_lowerCamelCase)
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
__A : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
tokenizer.save_pretrained(_lowerCamelCase)
__A : int = tokenizer.__class__.from_pretrained(_lowerCamelCase)
__A : Dict = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
self.assertIn(_lowerCamelCase , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
__A : str = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_lowerCamelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.get_tokenizers(do_lower_case=_lowerCamelCase)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
__A : Tuple = self.get_clean_sequence(_lowerCamelCase)
# a special token for Canine can be defined as follows:
__A : Tuple = 0Xe_0_0_5
__A : Optional[int] = chr(_lowerCamelCase)
tokenizer.add_special_tokens({'cls_token': special_token})
__A : Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertEqual(len(_lowerCamelCase) , 1)
__A : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCamelCase)
__A : List[str] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
__A : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
__A : Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertEqual(_lowerCamelCase , input_encoded + special_token_id)
__A : Any = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase)
self.assertTrue(special_token not in decoded)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_tokenizers(do_lower_case=_lowerCamelCase)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
__A : int = chr(0Xe_0_0_5)
__A : str = chr(0Xe_0_0_6)
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCamelCase)
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]})
__A : Optional[int] = tokenizer.tokenize(_lowerCamelCase)
__A : Optional[Any] = tokenizer.tokenize(_lowerCamelCase)
self.assertEqual(len(_lowerCamelCase) , 1)
self.assertEqual(len(_lowerCamelCase) , 1)
self.assertEqual(token_a[0] , _lowerCamelCase)
self.assertEqual(token_a[0] , _lowerCamelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
# a special token for Canine can be defined as follows:
__A : Optional[int] = 0Xe_0_0_6
__A : List[str] = chr(_lowerCamelCase)
__A : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase)
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase)
tokenizer.from_pretrained(_lowerCamelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase)
with open(os.path.join(_lowerCamelCase , 'special_tokens_map.json') , encoding='utf-8') as json_file:
__A : Optional[Any] = json.load(_lowerCamelCase)
with open(os.path.join(_lowerCamelCase , 'tokenizer_config.json') , encoding='utf-8') as json_file:
__A : Tuple = json.load(_lowerCamelCase)
# a special token for Canine can be defined as follows:
__A : int = 0Xe_0_0_6
__A : Optional[Any] = chr(_lowerCamelCase)
__A : Union[str, Any] = [new_token_a]
__A : Optional[int] = [new_token_a]
with open(os.path.join(_lowerCamelCase , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase)
with open(os.path.join(_lowerCamelCase , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : str = tokenizer_class.from_pretrained(_lowerCamelCase , extra_ids=0)
self.assertIn(_lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a])) , )
__A : Optional[int] = 0Xe_0_0_7
__A : Any = chr(_lowerCamelCase)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : Tuple = [AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase)]
__A : Optional[Any] = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , extra_ids=0)
self.assertIn(_lowerCamelCase , tokenizer.additional_special_tokens)
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a])))
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase)
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
__A : List[str] = '''hello world'''
if self.space_between_special_tokens:
__A : Optional[Any] = '''[CLS] hello world [SEP]'''
else:
__A : Tuple = input
__A : Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
__A : List[Any] = tokenizer.decode(_lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(_lowerCamelCase , [output, output.lower()])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}'):
__A : Any = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__A : str = '''a'''
__A : List[Any] = ord(_lowerCamelCase)
for attr in attributes_list:
setattr(_lowerCamelCase , attr + '_id' , _lowerCamelCase)
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase) , _lowerCamelCase)
self.assertEqual(getattr(_lowerCamelCase , attr + '_id') , _lowerCamelCase)
setattr(_lowerCamelCase , attr + '_id' , _lowerCamelCase)
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase) , _lowerCamelCase)
self.assertEqual(getattr(_lowerCamelCase , attr + '_id') , _lowerCamelCase)
setattr(_lowerCamelCase , 'additional_special_tokens_ids' , [])
self.assertListEqual(getattr(_lowerCamelCase , 'additional_special_tokens') , [])
self.assertListEqual(getattr(_lowerCamelCase , 'additional_special_tokens_ids') , [])
__A : List[Any] = 0Xe_0_0_6
__A : str = chr(_lowerCamelCase)
setattr(_lowerCamelCase , 'additional_special_tokens_ids' , [additional_special_token_id])
self.assertListEqual(getattr(_lowerCamelCase , 'additional_special_tokens') , [additional_special_token])
self.assertListEqual(getattr(_lowerCamelCase , 'additional_special_tokens_ids') , [additional_special_token_id])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass | 8 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.dummy_uncond_unet
a :Any = PNDMScheduler()
a :Any = PNDMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pndm.to(_lowerCamelCase )
pndm.set_progress_bar_config(disable=_lowerCamelCase )
a :Any = torch.manual_seed(0 )
a :List[str] = pndm(generator=_lowerCamelCase , num_inference_steps=20 , output_type='''numpy''' ).images
a :int = torch.manual_seed(0 )
a :str = pndm(generator=_lowerCamelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=_lowerCamelCase )[0]
a :int = image[0, -3:, -3:, -1]
a :List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a :str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = '''google/ddpm-cifar10-32'''
a :str = UNetaDModel.from_pretrained(_lowerCamelCase )
a :Optional[Any] = PNDMScheduler()
a :Union[str, Any] = PNDMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pndm.to(_lowerCamelCase )
pndm.set_progress_bar_config(disable=_lowerCamelCase )
a :Dict = torch.manual_seed(0 )
a :List[str] = pndm(generator=_lowerCamelCase , output_type='''numpy''' ).images
a :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a :int = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 445 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
snake_case__ : Any = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _a ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase = " " ) -> Optional[int]:
UpperCamelCase_ = sentence_delimiter
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
return list(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = []
for sent_idx, sentence in enumerate(_UpperCAmelCase ):
chars.extend(self.process_string(_UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
snake_case__ : List[str] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
snake_case__ : Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
snake_case__ : int = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
snake_case__ : int = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
snake_case__ : Optional[int] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
if concatenate_texts:
return jiwer.compute_measures(
_UpperCAmelCase , _UpperCAmelCase , truth_transform=_UpperCAmelCase , hypothesis_transform=_UpperCAmelCase , )["wer"]
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for prediction, reference in zip(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = jiwer.compute_measures(
_UpperCAmelCase , _UpperCAmelCase , truth_transform=_UpperCAmelCase , hypothesis_transform=_UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 618 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
A_ = ["""accelerate""", """launch"""]
A_ = Path.home() / """.cache/huggingface/accelerate"""
A_ = """default_config.yaml"""
A_ = config_folder / config_file
A_ = config_folder / """_default_config.yaml"""
A_ = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Tuple:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = """test-tpu"""
A_ = """us-central1-a"""
A_ = """ls"""
A_ = ["""accelerate""", """tpu-config"""]
A_ = """cd /usr/share"""
A_ = """tests/test_samples/test_command_file.sh"""
A_ = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 618 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=3_0 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=3_2 , __snake_case=2 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1_0 , __snake_case=0.02 , __snake_case=3 , __snake_case=0.6 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = mask_ratio
snake_case = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a_ ( self ):
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def a_ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = TFViTMAEModel(config=lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = TFViTMAEForPreTraining(lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected sequence length = num_patches
snake_case = (self.image_size // self.patch_size) ** 2
snake_case = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case = 1
snake_case = TFViTMAEForPreTraining(lowerCAmelCase__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
snake_case = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
((snake_case) , (snake_case) , (snake_case)) = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = TFViTMAEModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def a_ ( self ):
pass
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def a_ ( self ):
np.random.seed(2 )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = int((config.image_size // config.patch_size) ** 2 )
snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase__ )
snake_case = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
snake_case = copy.deepcopy(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
snake_case = outputs_dict[0].numpy()
snake_case = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def a_ ( self ):
np.random.seed(2 )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = int((config.image_size // config.patch_size) ** 2 )
snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__snake_case ):
snake_case = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCAmelCase__ ):
snake_case = v.numpy()
else:
snake_case = np.array(lowerCAmelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase__ )
snake_case = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case = prepare_numpy_arrays(lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
snake_case = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
np.random.seed(2 )
snake_case = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case = tf.constant(lowerCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case = tf_noise
super().check_pt_tf_models(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a_ ( self ):
np.random.seed(2 )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCAmelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCAmelCase__ , lowerCAmelCase__ ),)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCAmelCase__ , '''_keras_serializable''' , lowerCAmelCase__ )
}
snake_case = int((config.image_size // config.patch_size) ** 2 )
snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case = tf.convert_to_tensor(lowerCAmelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case = main_layer_class(lowerCAmelCase__ )
snake_case = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case = tf.keras.Model(lowerCAmelCase__ , outputs=main_layer(lowerCAmelCase__ ) )
snake_case = model(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = os.path.join(lowerCAmelCase__ , '''keras_model.h5''' )
model.save(lowerCAmelCase__ )
snake_case = tf.keras.models.load_model(
lowerCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCAmelCase__ , tf.keras.Model )
snake_case = model(lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a_ ( self ):
np.random.seed(2 )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = int((config.image_size // config.patch_size) ** 2 )
snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase__ )
snake_case = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
snake_case = outputs.last_hidden_state.numpy()
snake_case = 0
else:
snake_case = outputs.logits.numpy()
snake_case = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
snake_case = model_class.from_pretrained(lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
snake_case = after_outputs['''last_hidden_state'''].numpy()
snake_case = 0
else:
snake_case = after_outputs['''logits'''].numpy()
snake_case = 0
snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
def a_ ( self ):
np.random.seed(2 )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = int((config.image_size // config.patch_size) ** 2 )
snake_case = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase__ )
snake_case = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
snake_case = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCAmelCase__ )
snake_case = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case = model_class.from_config(model.config )
snake_case = new_model(lowerCAmelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
snake_case = new_model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def a_ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def a_ ( self ):
pass
@slow
def a_ ( self ):
snake_case = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a_ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def a_ ( self ):
np.random.seed(2 )
snake_case = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case = ViTMAEConfig()
snake_case = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
# verify the logits
snake_case = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
| 550 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( lowercase : Tuple, lowercase : List[str], lowercase : Optional[int], lowercase : List[str], lowercase : List[str]=True, lowercase : str="pt" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {'''add_prefix_space''': True} if isinstance(lowercase, lowercase ) and not line.startswith(''' ''' ) else {}
_UpperCamelCase = padding_side
return tokenizer(
[line], max_length=lowercase, padding='''max_length''' if pad_to_max_length else None, truncation=lowercase, return_tensors=lowercase, add_special_tokens=lowercase, **lowercase, )
def a__ ( lowercase : str, lowercase : int, lowercase : Tuple=None, ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = input_ids.ne(lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str="train" , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict="" , ) -> Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.source''' )
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.target''' )
_UpperCamelCase = self.get_char_lens(self.src_file )
_UpperCamelCase = max_source_length
_UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
_UpperCamelCase = tokenizer
_UpperCamelCase = prefix
if n_obs is not None:
_UpperCamelCase = self.src_lens[:n_obs]
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
def __len__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[Any] , lowerCAmelCase__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = index + 1 # linecache starts at 1
_UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
_UpperCamelCase = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
_UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , '''right''' )
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , '''right''' )
_UpperCamelCase = source_inputs['''input_ids'''].squeeze()
_UpperCamelCase = target_inputs['''input_ids'''].squeeze()
_UpperCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ : Union[str, Any] = getLogger(__name__)
def a__ ( lowercase : List[List] ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(lowercase ) )
def a__ ( lowercase : str ) -> None:
"""simple docstring"""
_UpperCamelCase = get_git_info()
save_json(lowercase, os.path.join(lowercase, '''git_log.json''' ) )
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : str=4, **lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
with open(lowercase, '''w''' ) as f:
json.dump(lowercase, lowercase, indent=lowercase, **lowercase )
def a__ ( lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(lowercase ) as f:
return json.load(lowercase )
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = git.Repo(search_parent_directories=lowercase )
_UpperCamelCase = {
'''repo_id''': str(lowercase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def a__ ( lowercase : Callable, lowercase : Iterable ) -> List:
"""simple docstring"""
return list(map(lowercase, lowercase ) )
def a__ ( lowercase : List[Any], lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
with open(lowercase, '''wb''' ) as f:
return pickle.dump(lowercase, lowercase )
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
def remove_articles(lowercase : Tuple ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', lowercase )
def white_space_fix(lowercase : Tuple ):
return " ".join(text.split() )
def remove_punc(lowercase : Optional[int] ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = Counter(lowercase ) & Counter(lowercase )
_UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowercase : Tuple, lowercase : Any ) -> List[str]:
"""simple docstring"""
return normalize_answer(lowercase ) == normalize_answer(lowercase )
def a__ ( lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
assert len(lowercase ) == len(lowercase )
_UpperCamelCase = 0
for hypo, pred in zip(lowercase, lowercase ):
em += exact_match_score(lowercase, lowercase )
if len(lowercase ) > 0:
em /= len(lowercase )
return {"em": em}
def a__ ( lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def a__ ( lowercase : int, lowercase : List[Any], lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase, lowercase, lowercase ):
if not hasattr(lowercase, lowercase ) and not hasattr(lowercase, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase ) )
delattr(lowercase, lowercase )
continue
_UpperCamelCase = p if hasattr(lowercase, lowercase ) else equivalent_param[p]
setattr(lowercase, lowercase, getattr(lowercase, lowercase ) )
delattr(lowercase, lowercase )
return hparams, config
| 98 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000 )-> int:
lowerCAmelCase_ : List[Any] = 2**power
lowerCAmelCase_ : Any = 0
while n:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.