code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class __a (UpperCAmelCase__ ):
__a : int = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
__a : str = Features({"text": Value("string" )} )
__a : Union[str, Any] = Features({} )
__a : Any = "text"
@property
def UpperCAmelCase__ ( self : int ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 710
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 0
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = {} # Mapping from char to TrieNode
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase_ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase_ : List[Any] = TrieNode()
UpperCAmelCase_ : List[Any] = curr.nodes[char]
UpperCAmelCase_ : List[str] = True
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase_ : str = curr.nodes[char]
return curr.is_leaf
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> Tuple:
"""simple docstring"""
def _delete(__magic_name__ : str , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> bool:
if index == len(UpperCAmelCase_ ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase_ : List[Any] = False
return len(curr.nodes ) == 0
UpperCAmelCase_ : Tuple = word[index]
UpperCAmelCase_ : Tuple = curr.nodes.get(UpperCAmelCase_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase_ : Optional[Any] = _delete(UpperCAmelCase_ , UpperCAmelCase_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase_ , 0 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if node.is_leaf:
print(_snake_case, end=''' ''' )
for key, value in node.nodes.items():
print_words(_snake_case, word + key )
def lowerCamelCase_ ( ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase_ : int = TrieNode()
root.insert_many(_snake_case )
# print_words(root, "")
assert all(root.find(_snake_case ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
print(str(_snake_case ), '''works!''' if passes else '''doesn\'t work :(''' )
def lowerCamelCase_ ( ) -> Optional[int]:
assert test_trie()
def lowerCamelCase_ ( ) -> Optional[int]:
print_results('''Testing trie functionality''', test_trie() )
if __name__ == "__main__":
main()
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __a (UpperCamelCase_ ):
__a : List[Any] = "layoutlmv3"
def __init__( self : Optional[int] , __magic_name__ : Union[str, Any]=5_02_65 , __magic_name__ : List[Any]=7_68 , __magic_name__ : Union[str, Any]=12 , __magic_name__ : Union[str, Any]=12 , __magic_name__ : Tuple=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : List[str]=1 , __magic_name__ : int=0 , __magic_name__ : str=2 , __magic_name__ : List[str]=10_24 , __magic_name__ : str=1_28 , __magic_name__ : str=1_28 , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]=32 , __magic_name__ : Any=1_28 , __magic_name__ : Optional[Any]=64 , __magic_name__ : Dict=2_56 , __magic_name__ : List[Any]=True , __magic_name__ : str=True , __magic_name__ : Dict=True , __magic_name__ : Dict=2_24 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Any=16 , __magic_name__ : Any=None , **__magic_name__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase_ : Union[str, Any] = max_ad_position_embeddings
UpperCAmelCase_ : List[Any] = coordinate_size
UpperCAmelCase_ : List[str] = shape_size
UpperCAmelCase_ : Optional[int] = has_relative_attention_bias
UpperCAmelCase_ : List[Any] = rel_pos_bins
UpperCAmelCase_ : str = max_rel_pos
UpperCAmelCase_ : Any = has_spatial_attention_bias
UpperCAmelCase_ : Union[str, Any] = rel_ad_pos_bins
UpperCAmelCase_ : Union[str, Any] = max_rel_ad_pos
UpperCAmelCase_ : Union[str, Any] = text_embed
UpperCAmelCase_ : List[str] = visual_embed
UpperCAmelCase_ : Optional[Any] = input_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : List[Any] = classifier_dropout
class __a (UpperCamelCase_ ):
__a : List[Any] = version.parse("1.12" )
@property
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
return 1E-5
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return 12
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : "ProcessorMixin" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , __magic_name__ : int = 3 , __magic_name__ : int = 40 , __magic_name__ : int = 40 , ) -> Optional[Any]:
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
UpperCAmelCase_ : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : Any = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 712
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 0
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
# Initialise PyTorch model
UpperCAmelCase_ : List[str] = RemBertConfig.from_json_file(__UpperCamelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(__UpperCamelCase ) ) )
UpperCAmelCase_ : int = RemBertModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__UpperCamelCase ) )
torch.save(model.state_dict(), __UpperCamelCase )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : Optional[int] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 713
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = """huggingface/label-files"""
UpperCAmelCase_ : Optional[Any] = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ), '''r''' ) )
UpperCAmelCase_ : Tuple = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[str] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ : Optional[int] = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE__, num_labels=1000, idalabel=SCREAMING_SNAKE_CASE__, labelaid=SCREAMING_SNAKE_CASE__, )
return config
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
if "stem.conv" in name:
UpperCAmelCase_ : int = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
UpperCAmelCase_ : Optional[int] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ : Any = """bit.encoder.""" + name
return name
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
UpperCAmelCase_ : List[Any] = get_config(SCREAMING_SNAKE_CASE__ )
# load original model from timm
UpperCAmelCase_ : str = create_model(SCREAMING_SNAKE_CASE__, pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ : int = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = val.squeeze() if """head""" in key else val
# load HuggingFace model
UpperCAmelCase_ : int = BitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# create image processor
UpperCAmelCase_ : Optional[Any] = create_transform(**resolve_data_config({}, model=SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Tuple = transform.transforms
UpperCAmelCase_ : List[str] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
UpperCAmelCase_ : int = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE__, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=SCREAMING_SNAKE_CASE__, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=SCREAMING_SNAKE_CASE__, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : List[str] = transform(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
UpperCAmelCase_ : List[Any] = processor(SCREAMING_SNAKE_CASE__, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ : str = model(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ : str = timm_model(SCREAMING_SNAKE_CASE__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
snake_case_ : Dict = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 714
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
return np.maximum(0, SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 715
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class __a :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=__snake_case )
class __a :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a (__snake_case ):
__a : List[InputFeatures]
def __init__( self : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] = None , __magic_name__ : int=False , __magic_name__ : List[Any] = False , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = hans_processors[task]()
UpperCAmelCase_ : Optional[Any] = os.path.join(
A_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(A_ ) , A_ , ) , )
UpperCAmelCase_ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ : int = label_list[2], label_list[1]
UpperCAmelCase_ : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : int = cached_features_file + ".lock"
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ : List[str] = torch.load(A_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ : List[Any] = (
processor.get_dev_examples(A_ ) if evaluate else processor.get_train_examples(A_ )
)
logger.info('''Training examples: %s''' , len(A_ ) )
UpperCAmelCase_ : Tuple = hans_convert_examples_to_features(A_ , A_ , A_ , A_ )
logger.info('''Saving features into cached file %s''' , A_ )
torch.save(self.features , A_ )
def __len__( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
__a : List[InputFeatures]
def __init__( self : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int = 1_28 , __magic_name__ : List[Any]=False , __magic_name__ : Union[str, Any] = False , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = hans_processors[task]()
UpperCAmelCase_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ : Any = label_list[2], label_list[1]
UpperCAmelCase_ : Tuple = label_list
UpperCAmelCase_ : Optional[Any] = processor.get_dev_examples(A_ ) if evaluate else processor.get_train_examples(A_ )
UpperCAmelCase_ : str = hans_convert_examples_to_features(A_ , A_ , A_ , A_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(A_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ : str = tf.data.Dataset.from_generator(
A_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return self.dataset
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
return self.label_list
class __a (__snake_case ):
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(A_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def UpperCAmelCase__ ( self : int , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(A_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
for i, line in enumerate(A_ ):
if i == 0:
continue
UpperCAmelCase_ : Union[str, Any] = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ : Optional[int] = line[5]
UpperCAmelCase_ : Tuple = line[6]
UpperCAmelCase_ : int = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCAmelCase_ : Optional[Any] = line[0]
examples.append(InputExample(guid=A_ , text_a=A_ , text_b=A_ , label=A_ , pairID=A_ ) )
return examples
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : List[Any], ) -> int:
UpperCAmelCase_ : Optional[int] = {label: i for i, label in enumerate(_lowerCAmelCase )}
UpperCAmelCase_ : List[str] = []
for ex_index, example in tqdm.tqdm(enumerate(_lowerCAmelCase ), desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCAmelCase_ : List[str] = tokenizer(
example.text_a, example.text_b, add_special_tokens=_lowerCAmelCase, max_length=_lowerCAmelCase, padding='''max_length''', truncation=_lowerCAmelCase, return_overflowing_tokens=_lowerCAmelCase, )
UpperCAmelCase_ : str = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ : Tuple = int(example.pairID )
features.append(InputFeatures(**_lowerCAmelCase, label=_lowerCAmelCase, pairID=_lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
snake_case_ : Tuple = {
"""hans""": 3,
}
snake_case_ : List[Any] = {
"""hans""": HansProcessor,
}
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case_ : List[Any] = logging.get_logger(__name__)
class __a (UpperCamelCase__ ):
def __init__( self : Dict , *__magic_name__ : Any , **__magic_name__ : int ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 717
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 0
|
'''simple docstring'''
class __a :
def __init__( self : Dict , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = n
UpperCAmelCase_ : List[str] = [None] * self.n
UpperCAmelCase_ : List[str] = 0 # index of the first element
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
def __len__( self : Optional[int] ) -> int:
"""simple docstring"""
return self.size
def UpperCAmelCase__ ( self : Any ) -> bool:
"""simple docstring"""
return self.size == 0
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase_ : List[str] = data
UpperCAmelCase_ : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase_ : Dict = self.array[self.front]
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 718
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 0
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : List[str] = {"vocab_file": "spiece.model"}
snake_case_ : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
snake_case_ : Any = {
"google/bigbird-roberta-base": 40_96,
"google/bigbird-roberta-large": 40_96,
"google/bigbird-base-trivia-itc": 40_96,
}
class __a (__a ):
__a : int = VOCAB_FILES_NAMES
__a : Any = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : int = ["input_ids", "attention_mask"]
__a : List[int] = []
def __init__( self : List[str] , __magic_name__ : int , __magic_name__ : List[Any]="<unk>" , __magic_name__ : int="<s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Any="<pad>" , __magic_name__ : Any="[SEP]" , __magic_name__ : Any="[MASK]" , __magic_name__ : Any="[CLS]" , __magic_name__ : Any = None , **__magic_name__ : str , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
UpperCAmelCase_ : int = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
UpperCAmelCase_ : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
UpperCAmelCase_ : int = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
UpperCAmelCase_ : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
return state
def __setstate__( self : Any , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : str ) -> Any:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
UpperCAmelCase_ : Any = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] = False , __magic_name__ : List[Any] = None , __magic_name__ : Any = True , **__magic_name__ : str , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase_ : Tuple = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(lowerCAmelCase_ ) )
else:
UpperCAmelCase_ : Dict = ''''''.join(lowerCAmelCase_ )
UpperCAmelCase_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ : Optional[Any] = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : int = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , '''wb''' ) as fi:
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] = None , __magic_name__ : Union[str, Any] = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Tuple = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 719
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> Tuple:
return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case_ : Dict = None
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : Dict = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
snake_case_ : List[Any] = {
"facebook/nllb-large-en-ro": 10_24,
"facebook/nllb-200-distilled-600M": 10_24,
}
# fmt: off
snake_case_ : Dict = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __a (__a ):
__a : List[Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = ["input_ids", "attention_mask"]
__a : Tuple = NllbTokenizer
__a : str = []
__a : Any = []
def __init__( self : Union[str, Any] , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Any="<s>" , __magic_name__ : Any="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : Optional[Any]="<unk>" , __magic_name__ : Optional[int]="<pad>" , __magic_name__ : str="<mask>" , __magic_name__ : str=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=False , **__magic_name__ : int , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Optional[int] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
UpperCAmelCase_ : int = legacy_behaviour
super().__init__(
vocab_file=A__ , tokenizer_file=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , legacy_behaviour=A__ , **A__ , )
UpperCAmelCase_ : Optional[int] = vocab_file
UpperCAmelCase_ : List[Any] = False if not self.vocab_file else True
UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCAmelCase_ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(A__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
UpperCAmelCase_ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self : int , __magic_name__ : Dict ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , **__magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Any = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(A__ )
UpperCAmelCase_ : Union[str, Any] = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Tuple = "eng_Latn" , __magic_name__ : Any = None , __magic_name__ : Optional[int] = "fra_Latn" , **__magic_name__ : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.convert_tokens_to_ids(A__ )
if self.legacy_behaviour:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ : Dict = [self.cur_lang_code]
UpperCAmelCase_ : str = [self.eos_token_id]
UpperCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = self.convert_tokens_to_ids(A__ )
if self.legacy_behaviour:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ : Tuple = [self.cur_lang_code]
UpperCAmelCase_ : Union[str, Any] = [self.eos_token_id]
UpperCAmelCase_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ):
copyfile(self.vocab_file , A__ )
return (out_vocab_file,)
| 700
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Dict = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __a (lowerCamelCase ):
__a : Optional[int] = 'roberta-prelayernorm'
def __init__( self : List[Any] , __magic_name__ : int=5_02_65 , __magic_name__ : Any=7_68 , __magic_name__ : str=12 , __magic_name__ : List[str]=12 , __magic_name__ : Any=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Dict=5_12 , __magic_name__ : List[Any]=2 , __magic_name__ : str=0.0_2 , __magic_name__ : str=1E-12 , __magic_name__ : List[Any]=1 , __magic_name__ : Optional[int]=0 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Any="absolute" , __magic_name__ : Optional[Any]=True , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : List[str] = position_embedding_type
UpperCAmelCase_ : Dict = use_cache
UpperCAmelCase_ : Any = classifier_dropout
class __a (lowerCamelCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 701
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Union[str, Any]=False, SCREAMING_SNAKE_CASE__ : Any=False, SCREAMING_SNAKE_CASE__ : str=False ) -> Optional[int]:
UpperCAmelCase_ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
for i in range(config.num_hidden_layers ):
UpperCAmelCase_ : int = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase__, lowercase__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = dct.pop(lowercase__ )
UpperCAmelCase_ : Any = val
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
UpperCAmelCase_ : str = ViltConfig(image_size=384, patch_size=32, tie_word_embeddings=lowercase__ )
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : List[str] = False
if "vqa" in checkpoint_url:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Any = 3129
UpperCAmelCase_ : Dict = '''huggingface/label-files'''
UpperCAmelCase_ : Tuple = '''vqa2-id2label.json'''
UpperCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type='''dataset''' ), '''r''' ) )
UpperCAmelCase_ : str = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : str = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Optional[Any] = {0: '''False''', 1: '''True'''}
UpperCAmelCase_ : int = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Optional[Any] = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[int] = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Dict = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : str = torch.hub.load_state_dict_from_url(lowercase__, map_location='''cpu''' )['''state_dict''']
UpperCAmelCase_ : str = create_rename_keys(lowercase__, lowercase__, lowercase__, lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__, lowercase__, lowercase__ )
read_in_q_k_v(lowercase__, lowercase__ )
if mlm_model or irtr_model:
UpperCAmelCase_ : int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(lowercase__, lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model.load_state_dict(lowercase__, strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
UpperCAmelCase_ : List[str] = ViltImageProcessor(size=384 )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase_ : List[str] = ViltProcessor(lowercase__, lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase_ : str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=lowercase__ ).raw )
UpperCAmelCase_ : Tuple = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=lowercase__ ).raw )
UpperCAmelCase_ : Optional[Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCAmelCase_ : str = processor(lowercase__, lowercase__, return_tensors='''pt''' )
UpperCAmelCase_ : Tuple = processor(lowercase__, lowercase__, return_tensors='''pt''' )
UpperCAmelCase_ : List[str] = model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
UpperCAmelCase_ : int = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''', stream=lowercase__ ).raw )
if mlm_model:
UpperCAmelCase_ : Union[str, Any] = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCAmelCase_ : Union[str, Any] = '''How many cats are there?'''
UpperCAmelCase_ : Union[str, Any] = processor(lowercase__, lowercase__, return_tensors='''pt''' )
UpperCAmelCase_ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
UpperCAmelCase_ : List[Any] = torch.Size([1, 11, 30522] )
UpperCAmelCase_ : Tuple = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], lowercase__, atol=1E-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase_ : Optional[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase_ : Dict = torch.Size([1, 3129] )
UpperCAmelCase_ : str = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3], lowercase__, atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], lowercase__, atol=1E-4 )
# verify vqa prediction equals "2"
UpperCAmelCase_ : str = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase_ : str = torch.Size([1, 2] )
UpperCAmelCase_ : int = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3], lowercase__, atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
snake_case_ : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 702
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
snake_case_ : List[str] = TypeVar("T")
class __a (Generic[T] ):
def __init__( self : int , __magic_name__ : str , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Any | T = None
UpperCAmelCase_ : int = len(lowercase__ )
UpperCAmelCase_ : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase_ : Optional[Any] = fnc
self.build()
def UpperCAmelCase__ ( self : Tuple ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase_ : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : List[str] ) -> None:
"""simple docstring"""
p += self.N
UpperCAmelCase_ : int = v
while p > 1:
UpperCAmelCase_ : str = p // 2
UpperCAmelCase_ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : List[Any] ) -> T | None: # noqa: E741
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = l + self.N, r + self.N
UpperCAmelCase_ : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase_ : Any = self.st[l] if res is None else self.fn(lowercase__ , self.st[l] )
if r % 2 == 0:
UpperCAmelCase_ : List[str] = self.st[r] if res is None else self.fn(lowercase__ , self.st[r] )
UpperCAmelCase_ : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
snake_case_ : Any = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
snake_case_ : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
snake_case_ : Optional[int] = SegmentTree(test_array, min)
snake_case_ : Dict = SegmentTree(test_array, max)
snake_case_ : Tuple = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) -> Optional[Any]:
for i in range(len(lowerCAmelCase_ ) ):
for j in range(lowerCAmelCase_, len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[str] = reduce(lowerCAmelCase_, test_array[i : j + 1] )
UpperCAmelCase_ : Optional[int] = reduce(lowerCAmelCase_, test_array[i : j + 1] )
UpperCAmelCase_ : Dict = reduce(lambda SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase_, lowerCAmelCase_ )
assert max_range == max_segment_tree.query(lowerCAmelCase_, lowerCAmelCase_ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase_, lowerCAmelCase_ )
test_all_segments()
for index, value in test_updates.items():
snake_case_ : Tuple = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 703
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __a (unittest.TestCase ):
__a : int = MODEL_FOR_MASKED_LM_MAPPING
__a : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
UpperCAmelCase_ : Dict = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_80_15, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_55_06, '''token_str''': ''' accuser'''},
] , )
UpperCAmelCase_ : Any = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_80_15,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_55_06,
'''token_str''': ''' accuser''',
},
] , )
UpperCAmelCase_ : List[Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
UpperCAmelCase_ : str = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_56_76, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase_ : str = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase_ : Any = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
] , )
UpperCAmelCase_ : str = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
UpperCAmelCase_ : Any = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
@require_torch
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(lowercase__ )
@slow
@require_tf
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(lowercase__ )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 6_10, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 15_73, '''token_str''': ''' Chris'''},
] , )
UpperCAmelCase_ : Optional[int] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_5_1,
'''token''': 22_01,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_1_4,
'''token''': 1_27_90,
'''token_str''': ''' Lyon''',
},
] , )
UpperCAmelCase_ : Dict = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = None
self.run_pipeline_test(lowercase__ , [] )
@require_tf
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = None
self.run_pipeline_test(lowercase__ , [] )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
UpperCAmelCase_ : Dict = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase_ : Tuple = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = fill_masker.tokenizer
UpperCAmelCase_ : Optional[int] = fill_masker.model
UpperCAmelCase_ : Any = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
lowercase__ , [
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
] , )
UpperCAmelCase_ : Union[str, Any] = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
lowercase__ , [
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
] , )
UpperCAmelCase_ : List[Any] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
lowercase__ , [
[
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
],
[
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
],
] , )
with self.assertRaises(lowercase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase__ ):
fill_masker('''This is''' )
self.run_test_top_k(lowercase__ , lowercase__ )
self.run_test_targets(lowercase__ , lowercase__ )
self.run_test_top_k_targets(lowercase__ , lowercase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase__ , lowercase__ )
self.fill_mask_with_multiple_masks(lowercase__ , lowercase__ )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tokenizer.get_vocab()
UpperCAmelCase_ : Tuple = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase_ : Union[str, Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , targets=lowercase__ )
UpperCAmelCase_ : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowercase__ , [
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
] , )
UpperCAmelCase_ : str = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , lowercase__ )
UpperCAmelCase_ : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(lowercase__ ) )
# Call argument
UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase_ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowercase__ )
self.assertEqual(
lowercase__ , [
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
] , )
UpperCAmelCase_ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , lowercase__ )
UpperCAmelCase_ : Dict = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(lowercase__ ) )
# Score equivalence
UpperCAmelCase_ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowercase__ )
UpperCAmelCase_ : List[Any] = [top_mask["""token_str"""] for top_mask in outputs]
UpperCAmelCase_ : Dict = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ) == set(lowercase__ ):
UpperCAmelCase_ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowercase__ )
UpperCAmelCase_ : Tuple = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
# Raises with invalid
with self.assertRaises(lowercase__ ):
UpperCAmelCase_ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase__ ):
UpperCAmelCase_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''''''] )
with self.assertRaises(lowercase__ ):
UpperCAmelCase_ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='''''' )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ , top_k=2 )
UpperCAmelCase_ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowercase__ , [
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
] , )
UpperCAmelCase_ : Optional[Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase_ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowercase__ , [
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
] , )
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tokenizer.get_vocab()
UpperCAmelCase_ : int = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
# top_k=2, ntargets=3
UpperCAmelCase_ : Optional[int] = sorted(vocab.keys() )[:3]
UpperCAmelCase_ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=lowercase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase_ : Any = [el["""token_str"""] for el in sorted(lowercase__ , key=lambda __magic_name__ : x["score"] , reverse=lowercase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase__ ).issubset(lowercase__ ):
UpperCAmelCase_ : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=lowercase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase__ ) , nested_simplify(lowercase__ ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase_ : Optional[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase_ : str = sorted(vocab.keys() )[:3]
UpperCAmelCase_ : int = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase_ : Any = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=lowercase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase__ ) , 3 )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FillMaskPipeline(model=lowercase__ , tokenizer=lowercase__ )
UpperCAmelCase_ : List[str] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowercase__ , [
[
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
],
[
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
],
[
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
{'''sequence''': ANY(lowercase__ ), '''score''': ANY(lowercase__ ), '''token''': ANY(lowercase__ ), '''token_str''': ANY(lowercase__ )},
],
] , )
| 704
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __a (lowerCamelCase ):
__a : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : Optional[int] , **__magic_name__ : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {
'''num_train_timesteps''': 10_00,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__magic_name__ )
return config
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__magic_name__ , prev_timestep=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config(variance_type='''fixed_small_log''' )
UpperCAmelCase_ : Dict = scheduler_class(**__magic_name__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(variance_type='''learned_range''' )
UpperCAmelCase_ : List[str] = scheduler_class(**__magic_name__ )
UpperCAmelCase_ : Any = 0.5
assert scheduler._get_variance(1 , predicted_variance=__magic_name__ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=__magic_name__ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=__magic_name__ ) - -0.0_0_1_0_0_1_1 < 1E-5
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : int = scheduler_class(**__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = scheduler.timesteps
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
for i, t in enumerate(__magic_name__ ):
# 1. predict noise residual
UpperCAmelCase_ : Any = model(__magic_name__ , __magic_name__ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : Optional[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ).prev_sample
UpperCAmelCase_ : Optional[Any] = pred_prev_sample
UpperCAmelCase_ : List[str] = torch.sum(torch.abs(__magic_name__ ) )
UpperCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(25 )
UpperCAmelCase_ : Tuple = scheduler.timesteps
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase_ : str = torch.manual_seed(0 )
for i, t in enumerate(__magic_name__ ):
# 1. predict noise residual
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , __magic_name__ )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase_ : Union[str, Any] = None
else:
UpperCAmelCase_ : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : str = scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , prev_timestep=__magic_name__ , generator=__magic_name__ ).prev_sample
UpperCAmelCase_ : int = pred_prev_sample
UpperCAmelCase_ : Dict = torch.sum(torch.abs(__magic_name__ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
| 706
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 0
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
if not is_accelerate_available():
return method
UpperCAmelCase_ : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Optional[int], *SCREAMING_SNAKE_CASE__ : Union[str, Any], **SCREAMING_SNAKE_CASE__ : int ):
if hasattr(self, '''_hf_hook''' ) and hasattr(self._hf_hook, '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self, *lowercase_, **lowercase_ )
return wrapper
| 707
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> int:
if not isinstance(__UpperCamelCase, __UpperCamelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
UpperCAmelCase_ : Optional[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a (_lowerCAmelCase ):
__a : List[str] = ['image_processor', 'tokenizer']
__a : List[Any] = 'BlipImageProcessor'
__a : Tuple = 'AutoTokenizer'
def __init__( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = self.image_processor
def __call__( self : List[str] , __magic_name__ : ImageInput = None , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : Tuple , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
UpperCAmelCase_ : Any = self.tokenizer
UpperCAmelCase_ : Tuple = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
UpperCAmelCase_ : Tuple = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
UpperCAmelCase_ : Any = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
UpperCAmelCase_ : Dict = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def UpperCAmelCase__ ( self : Optional[int] , *__magic_name__ : Union[str, Any] , **__magic_name__ : Any ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = self.tokenizer.model_input_names
UpperCAmelCase_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (UpperCamelCase__ , unittest.TestCase ):
__a : str = MgpstrTokenizer
__a : List[str] = False
__a : int = {}
__a : Any = False
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_a , range(len(_a ) ) ) )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def UpperCAmelCase__ ( self : List[str] , **__magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def UpperCAmelCase__ ( self : str , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = """tester"""
UpperCAmelCase_ : Union[str, Any] = """tester"""
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ : int = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
UpperCAmelCase_ : Optional[int] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ : Any = self.get_input_output_texts(_a )
UpperCAmelCase_ : str = tokenizer.tokenize(_a )
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(_a )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
| 710
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 0
|
'''simple docstring'''
snake_case_ : List[str] = range(2, 20 + 1)
snake_case_ : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
snake_case_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
UpperCAmelCase_ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase, len(__UpperCamelCase ) ) )
UpperCAmelCase_ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ), __UpperCamelCase ) ) )
UpperCAmelCase_ : Optional[int] = 0, 0
UpperCAmelCase_ : List[Any] = n - i
UpperCAmelCase_ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
UpperCAmelCase_ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase_ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase_ : List[str] = _k
break
if max_jump >= 0:
UpperCAmelCase_ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase_ : int = diff + c
for j in range(min(__UpperCamelCase, len(__UpperCamelCase ) ) ):
UpperCAmelCase_ : List[str] = divmod(__UpperCamelCase, 10 )
if new_c > 0:
add(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = []
else:
UpperCAmelCase_ : Optional[Any] = {c: []}
UpperCAmelCase_ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase_ : str = next_term(__UpperCamelCase, k - 1, i + dn, __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase_ : str = compute(__UpperCamelCase, __UpperCamelCase, i + dn, __UpperCamelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase_ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase_ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase, (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : int ) -> Any:
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase_ : Optional[Any] = i
UpperCAmelCase_ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase_ : int = ds_c + ds_b
diff += addend
UpperCAmelCase_ : List[Any] = 0
for j in range(__UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = a_i[j] + addend
UpperCAmelCase_ : List[str] = divmod(__UpperCamelCase, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
return diff, i - start_i
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
for j in range(__UpperCamelCase, len(__UpperCamelCase ) ):
UpperCAmelCase_ : Any = digits[j] + addend
if s >= 10:
UpperCAmelCase_ : Union[str, Any] = divmod(__UpperCamelCase, 10 )
UpperCAmelCase_ : Optional[int] = addend // 10 + quotient
else:
UpperCAmelCase_ : Any = s
UpperCAmelCase_ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase_ : Dict = divmod(__UpperCamelCase, 10 )
digits.append(__UpperCamelCase )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10**15 ) -> int:
UpperCAmelCase_ : List[Any] = [1]
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Tuple = 0
while True:
UpperCAmelCase_ : List[str] = next_term(__UpperCamelCase, 20, i + dn, __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase_ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 0
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __a (lowerCAmelCase__ ):
def __init__( self : Optional[int] , *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> int:
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
self.check_model_type(_lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : List[Any]=None , __magic_name__ : Any=None , **__magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = {}, {}
if padding is not None:
UpperCAmelCase_ : List[Any] = padding
if truncation is not None:
UpperCAmelCase_ : Optional[Any] = truncation
if top_k is not None:
UpperCAmelCase_ : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any = None , **__magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
if isinstance(_lowerCamelCase , (Image.Image, str) ) and isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase_ : str = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ : Optional[Any] = image
UpperCAmelCase_ : List[str] = super().__call__(_lowerCamelCase , **_lowerCamelCase )
return results
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=False , __magic_name__ : Any=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = load_image(inputs['''image'''] )
UpperCAmelCase_ : Tuple = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_lowerCamelCase , truncation=_lowerCamelCase )
UpperCAmelCase_ : Dict = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
model_inputs.update(_lowerCamelCase )
return model_inputs
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model(**_lowerCamelCase )
return model_outputs
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Dict=5 ) -> Tuple:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[Any] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = probs.topk(_lowerCamelCase )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : Tuple = scores.tolist()
UpperCAmelCase_ : Any = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 712
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
snake_case_ : List[str] = TypeVar("T")
class __a (Generic[T] ):
def __init__( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Any | T = None
UpperCAmelCase_ : int = len(__magic_name__ )
UpperCAmelCase_ : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase_ : Optional[Any] = fnc
self.build()
def UpperCAmelCase__ ( self : str ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase_ : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> None:
"""simple docstring"""
p += self.N
UpperCAmelCase_ : Optional[Any] = v
while p > 1:
UpperCAmelCase_ : List[str] = p // 2
UpperCAmelCase_ : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[int] ) -> T | None: # noqa: E741
"""simple docstring"""
UpperCAmelCase_ : List[str] = l + self.N, r + self.N
UpperCAmelCase_ : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase_ : List[str] = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] )
if r % 2 == 0:
UpperCAmelCase_ : Union[str, Any] = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] )
UpperCAmelCase_ : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
snake_case_ : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
snake_case_ : Dict = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
snake_case_ : Optional[int] = SegmentTree(test_array, min)
snake_case_ : Dict = SegmentTree(test_array, max)
snake_case_ : Dict = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) -> None:
for i in range(len(__snake_case ) ):
for j in range(__snake_case, len(__snake_case ) ):
UpperCAmelCase_ : Optional[Any] = reduce(__snake_case, test_array[i : j + 1] )
UpperCAmelCase_ : str = reduce(__snake_case, test_array[i : j + 1] )
UpperCAmelCase_ : List[str] = reduce(lambda SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__snake_case, __snake_case )
assert max_range == max_segment_tree.query(__snake_case, __snake_case )
assert sum_range == sum_segment_tree.query(__snake_case, __snake_case )
test_all_segments()
for index, value in test_updates.items():
snake_case_ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 713
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : Tuple , __magic_name__ : str , __magic_name__ : List[str]=3 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=10 , __magic_name__ : Optional[int]=[10, 20, 30, 40] , __magic_name__ : Optional[Any]=[1, 1, 2, 1] , __magic_name__ : List[str]=True , __magic_name__ : Optional[int]=True , __magic_name__ : Any="relu" , __magic_name__ : Any=3 , __magic_name__ : List[str]=None , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Tuple = embeddings_size
UpperCAmelCase_ : Optional[Any] = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : Union[str, Any] = len(__A )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = TFResNetModel(config=__A )
UpperCAmelCase_ : str = model(__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : str = TFResNetForImageClassification(__A )
UpperCAmelCase_ : str = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __a (__lowercase , __lowercase , unittest.TestCase ):
__a : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__a : List[Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__a : List[str] = False
__a : Optional[Any] = False
__a : List[str] = False
__a : Union[str, Any] = False
__a : List[str] = False
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = TFResNetModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=__A , has_text_modality=__A )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(__A )
UpperCAmelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ):
UpperCAmelCase_ : List[Any] = model_class(__A )
UpperCAmelCase_ : str = model(**self._prepare_for_class(__A , __A ) )
UpperCAmelCase_ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : Union[str, Any] = layer_type
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(__A , __A , __A )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = TFResNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( ) -> Dict:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __a (unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Dict = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=__A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase_ : Optional[int] = model(**__A )
# verify the logits
UpperCAmelCase_ : int = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
UpperCAmelCase_ : int = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1E-4 ) )
| 714
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 0
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , __UpperCAmelCase , )
class __a (__UpperCAmelCase ):
__a : Dict = RobertaConfig
__a : Optional[Any] = '''roberta'''
def __init__( self : Optional[int] , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = RobertaEmbeddings(__SCREAMING_SNAKE_CASE )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , __UpperCAmelCase , )
class __a (__UpperCAmelCase ):
__a : Dict = RobertaConfig
__a : Tuple = '''roberta'''
def __init__( self : Dict , __magic_name__ : Any ) -> int:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = config.num_labels
UpperCAmelCase_ : Any = config.num_hidden_layers
UpperCAmelCase_ : Dict = DeeRobertaModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ : str = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __magic_name__ : Any=None , __magic_name__ : Optional[int]=None , __magic_name__ : Dict=None , __magic_name__ : Dict=None , __magic_name__ : List[str]=None , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=-1 , __magic_name__ : List[Any]=False , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.num_layers
try:
UpperCAmelCase_ : Dict = self.roberta(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : Optional[Any] = outputs[1]
UpperCAmelCase_ : Dict = self.dropout(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.classifier(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ : str = e.message
UpperCAmelCase_ : Tuple = e.exit_layer
UpperCAmelCase_ : Optional[Any] = outputs[0]
if not self.training:
UpperCAmelCase_ : Dict = entropy(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : int = MSELoss()
UpperCAmelCase_ : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : int = CrossEntropyLoss()
UpperCAmelCase_ : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ : Dict = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(__SCREAMING_SNAKE_CASE )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : List[Any] = MSELoss()
UpperCAmelCase_ : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ : Union[str, Any] = CrossEntropyLoss()
UpperCAmelCase_ : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__SCREAMING_SNAKE_CASE )
if train_highway:
UpperCAmelCase_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ : str = (loss,) + outputs
if not self.training:
UpperCAmelCase_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
@dataclass
class __a :
__a : Tuple = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : Any = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Dict = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__a : Tuple = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Union[str, Any] = field(default=UpperCamelCase_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__a : List[str] = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __a :
__a : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__a : Union[str, Any] = field(
default=UpperCamelCase_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__a : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : Any = field(
default=UpperCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
UpperCAmelCase_ : List[str] = import_module('''tasks''' )
try:
UpperCAmelCase_ : Any = getattr(SCREAMING_SNAKE_CASE__, model_args.task_type )
UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCAmelCase_ : Optional[Any] = token_classification_task.get_labels(data_args.labels )
UpperCAmelCase_ : Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, idalabel=SCREAMING_SNAKE_CASE__, labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}, cache_dir=model_args.cache_dir, )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, )
UpperCAmelCase_ : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, )
# Get datasets
UpperCAmelCase_ : List[Any] = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__, data_dir=data_args.data_dir, tokenizer=SCREAMING_SNAKE_CASE__, labels=SCREAMING_SNAKE_CASE__, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
UpperCAmelCase_ : int = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__, data_dir=data_args.data_dir, tokenizer=SCREAMING_SNAKE_CASE__, labels=SCREAMING_SNAKE_CASE__, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE__ : np.ndarray, SCREAMING_SNAKE_CASE__ : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCAmelCase_ : str = np.argmax(SCREAMING_SNAKE_CASE__, axis=2 )
UpperCAmelCase_ : int = preds.shape
UpperCAmelCase_ : Dict = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ) -> Dict:
UpperCAmelCase_ : Any = align_predictions(p.predictions, p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ),
"precision": precision_score(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ),
"recall": recall_score(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ),
"f1": fa_score(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ),
}
# Data collator
UpperCAmelCase_ : Optional[int] = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=SCREAMING_SNAKE_CASE__, eval_dataset=SCREAMING_SNAKE_CASE__, compute_metrics=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : List[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : List[str] = trainer.evaluate()
UpperCAmelCase_ : Tuple = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE__ )
# Predict
if training_args.do_predict:
UpperCAmelCase_ : str = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE__, data_dir=data_args.data_dir, tokenizer=SCREAMING_SNAKE_CASE__, labels=SCREAMING_SNAKE_CASE__, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, )
UpperCAmelCase_ : str = trainer.predict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = align_predictions(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = os.path.join(training_args.output_dir, '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''', SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
UpperCAmelCase_ : List[str] = os.path.join(training_args.output_dir, '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
with open(os.path.join(data_args.data_dir, '''test.txt''' ), '''r''' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return results
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 717
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Dict = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
snake_case_ : Tuple = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
snake_case_ : Optional[Any] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
snake_case_ : str = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[Any]="binary" ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = simple_accuracy(_lowerCamelCase, _lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = float(fa_score(y_true=_lowerCamelCase, y_pred=_lowerCamelCase, average=_lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : Dict = {}
for id_pred, label in zip(_lowerCamelCase, _lowerCamelCase ):
UpperCAmelCase_ : Dict = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
UpperCAmelCase_ : List[Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase_ : Optional[int] = [(pred, label)]
UpperCAmelCase_ : Optional[int] = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase_ : Dict = zip(*_lowerCamelCase )
UpperCAmelCase_ : str = fa_score(y_true=_lowerCamelCase, y_pred=_lowerCamelCase, average='''macro''' )
fas.append(_lowerCamelCase )
UpperCAmelCase_ : str = int(sum(pred == label for pred, label in preds_labels ) == len(_lowerCamelCase ) )
ems.append(_lowerCamelCase )
UpperCAmelCase_ : Dict = float(sum(_lowerCamelCase ) / len(_lowerCamelCase ) )
UpperCAmelCase_ : Union[str, Any] = sum(_lowerCamelCase ) / len(_lowerCamelCase )
UpperCAmelCase_ : List[Any] = float(fa_score(y_true=_lowerCamelCase, y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a (datasets.Metric ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg='''macro''' )
elif self.config_name == "record":
UpperCAmelCase_ : List[str] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCAmelCase_ : List[Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 719
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 0
|
import os
import pytest
from attr import dataclass
snake_case_ : Union[str, Any] = 'us-east-1' # defaults region
@dataclass
class __a :
__a : str
__a : Any = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__a : Tuple = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5_500,
}
__a : str = {**hyperparameters, '''max_steps''': 1_000}
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return F"""{self.framework}-transfromers-test"""
@property
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
UpperCAmelCase_ : str = SageMakerTestEnvironment(framework=request.cls.framework )
| 720
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
snake_case_ : int = logging.get_logger(__name__)
class __a (__SCREAMING_SNAKE_CASE ):
__a : Any = ['input_features', 'attention_mask']
def __init__( self : Union[str, Any] , __magic_name__ : Dict=80 , __magic_name__ : Union[str, Any]=1_60_00 , __magic_name__ : Any=0.0 , __magic_name__ : int=10 , __magic_name__ : Tuple=25 , __magic_name__ : Union[str, Any]="hamming_window" , __magic_name__ : List[str]=3_27_68.0 , __magic_name__ : Dict=0.9_7 , __magic_name__ : Dict=1.0 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[str]=False , **__magic_name__ : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
UpperCAmelCase_ : int = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Optional[Any] = padding_value
UpperCAmelCase_ : List[str] = hop_length
UpperCAmelCase_ : List[str] = win_length
UpperCAmelCase_ : List[Any] = frame_signal_scale
UpperCAmelCase_ : Optional[Any] = preemphasis_coeff
UpperCAmelCase_ : Optional[int] = mel_floor
UpperCAmelCase_ : Any = normalize_means
UpperCAmelCase_ : Dict = normalize_vars
UpperCAmelCase_ : List[Any] = win_function
UpperCAmelCase_ : List[str] = return_attention_mask
UpperCAmelCase_ : Union[str, Any] = win_length * sampling_rate // 10_00
UpperCAmelCase_ : List[str] = hop_length * sampling_rate // 10_00
UpperCAmelCase_ : Tuple = optimal_fft_length(self.sample_size )
UpperCAmelCase_ : Dict = (self.n_fft // 2) + 1
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
if self.win_function == "hamming_window":
UpperCAmelCase_ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=_a )
else:
UpperCAmelCase_ : Tuple = window_function(window_length=self.sample_size , name=self.win_function )
UpperCAmelCase_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCAmelCase_ : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=_a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_a , preemphasis=self.preemphasis_coeff , mel_filters=_a , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCAmelCase_ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase_ : Union[str, Any] = np.subtract(_a , _a )
if self.normalize_vars:
UpperCAmelCase_ : Union[str, Any] = x[:input_length].std(axis=0 )
UpperCAmelCase_ : str = np.divide(_a , _a )
if input_length < x.shape[0]:
UpperCAmelCase_ : Optional[int] = padding_value
# make sure array is in float32
UpperCAmelCase_ : str = x.astype(np.floataa )
return x
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] = None ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_a , _a , self.padding_value ) for x, n in zip(_a , _a )]
def __call__( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] = False , __magic_name__ : Dict = None , __magic_name__ : Optional[int] = False , __magic_name__ : Optional[int] = None , __magic_name__ : str = None , __magic_name__ : List[str] = None , __magic_name__ : List[Any] = None , **__magic_name__ : List[str] , ) -> List[Any]:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase_ : Optional[Any] = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase_ : Optional[int] = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : Dict = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
UpperCAmelCase_ : Union[str, Any] = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : str = [raw_speech]
# extract fbank features
UpperCAmelCase_ : Dict = [self._extract_mfsc_features(_a ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase_ : List[str] = BatchFeature({'''input_features''': features} )
UpperCAmelCase_ : List[str] = self.pad(
_a , padding=_a , max_length=_a , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=_a , **_a , )
# make sure list is in array format
UpperCAmelCase_ : Tuple = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _a ):
UpperCAmelCase_ : Optional[int] = [np.asarray(_a , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase_ : List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase_ : List[str] = [np.asarray(_a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase_ : Optional[Any] = (
np.array(_a , dtype=np.intaa )
if self._get_padding_strategies(_a , max_length=_a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase_ : Any = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_a )
if return_tensors is not None:
UpperCAmelCase_ : Optional[int] = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 721
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
import random
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : bool = False ) -> Union[str, Any]:
UpperCAmelCase_ : str = {i: [] for i in range(_lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase ):
for j in range(i + 1, _lowerCamelCase ):
if random.random() < probability:
graph[i].append(_lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase )
return graph
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
return {
i: [j for j in range(_lowerCamelCase ) if i != j] for i in range(_lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Dict = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, ) -> float:
UpperCAmelCase_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase_ : int = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ : Tuple = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 702
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Dict = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
snake_case_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
snake_case_ : int = "docs/source/en/_toctree.yml"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[Any] = defaultdict(__snake_case )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase_ : Optional[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase_ : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase_ : Dict = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__snake_case ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__snake_case, key=lambda SCREAMING_SNAKE_CASE__ : s["title"].lower() )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> List[Any]:
with open(__snake_case, encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase_ : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase_ : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase_ : str = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase_ : Any = api_doc[model_idx]['''sections''']
UpperCAmelCase_ : Union[str, Any] = [(idx, section) for idx, section in enumerate(__snake_case ) if '''sections''' in section]
UpperCAmelCase_ : Union[str, Any] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase_ : Union[str, Any] = modality_doc['''sections''']
UpperCAmelCase_ : Dict = clean_model_doc_toc(__snake_case )
if old_modality_doc != new_modality_doc:
UpperCAmelCase_ : List[str] = True
if overwrite:
UpperCAmelCase_ : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase_ : Any = model_doc
UpperCAmelCase_ : List[Any] = api_doc
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__snake_case, allow_unicode=__snake_case ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
snake_case_ : List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case_ : str = pytest.mark.integration
@pytest.mark.parametrize('''path''', ['''paws''', '''csv'''] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
inspect_dataset(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = path + '''.py'''
assert script_name in os.listdir(lowerCAmelCase__ )
assert "__pycache__" not in os.listdir(lowerCAmelCase__ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''', ['''accuracy'''] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> Dict:
inspect_metric(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = path + '''.py'''
assert script_name in os.listdir(lowerCAmelCase__ )
assert "__pycache__" not in os.listdir(lowerCAmelCase__ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''', [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
UpperCAmelCase_ : str = get_dataset_config_info(lowerCAmelCase__, config_name=lowerCAmelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''', [
('''paws''', None, ValueError),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
with pytest.raises(lowerCAmelCase__ ):
get_dataset_config_info(lowerCAmelCase__, config_name=lowerCAmelCase__ )
@pytest.mark.parametrize(
'''path, expected''', [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = get_dataset_config_names(lowerCAmelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''', [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = get_dataset_infos(lowerCAmelCase__ )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Dict = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''', [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
UpperCAmelCase_ : Tuple = get_dataset_infos(lowerCAmelCase__ )
assert expected_config in infos
UpperCAmelCase_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''', [
('''paws''', None, ValueError),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
with pytest.raises(lowerCAmelCase__ ):
get_dataset_split_names(lowerCAmelCase__, config_name=lowerCAmelCase__ )
| 706
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class __a (__lowerCamelCase ):
__a : List[str] = '''blip_text_model'''
def __init__( self : str , __magic_name__ : List[str]=3_05_24 , __magic_name__ : int=7_68 , __magic_name__ : Any=7_68 , __magic_name__ : Optional[Any]=30_72 , __magic_name__ : Tuple=7_68 , __magic_name__ : Dict=12 , __magic_name__ : Union[str, Any]=8 , __magic_name__ : List[str]=5_12 , __magic_name__ : int="gelu" , __magic_name__ : Any=1E-12 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Tuple=3_05_22 , __magic_name__ : Tuple=2 , __magic_name__ : Any=0 , __magic_name__ : Any=1_02 , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=True , **__magic_name__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , sep_token_id=a_ , **a_ , )
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = encoder_hidden_size
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Optional[Any] = projection_dim
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : int = is_decoder
UpperCAmelCase_ : Union[str, Any] = use_cache
@classmethod
def UpperCAmelCase__ ( cls : Tuple , __magic_name__ : Tuple , **__magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCAmelCase_ : Optional[int] = cls.get_config_dict(a_ , **a_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase_ : str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class __a (__lowerCamelCase ):
__a : Any = '''blip_vision_model'''
def __init__( self : Optional[Any] , __magic_name__ : List[Any]=7_68 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : List[str]=12 , __magic_name__ : Dict=12 , __magic_name__ : Any=3_84 , __magic_name__ : Optional[Any]=16 , __magic_name__ : List[str]="gelu" , __magic_name__ : Any=1E-5 , __magic_name__ : Tuple=0.0 , __magic_name__ : Optional[Any]=1E-10 , **__magic_name__ : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**a_ )
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : str = projection_dim
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , __magic_name__ : Optional[int] , **__magic_name__ : Dict ) -> Dict:
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCAmelCase_ : List[Any] = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase_ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class __a (__lowerCamelCase ):
__a : Optional[int] = '''blip'''
__a : Optional[int] = True
def __init__( self : int , __magic_name__ : int=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[int]=5_12 , __magic_name__ : Any=2.6_5_9_2 , __magic_name__ : Dict=2_56 , **__magic_name__ : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(**a_ )
if text_config is None:
UpperCAmelCase_ : Tuple = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
UpperCAmelCase_ : List[str] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
UpperCAmelCase_ : Dict = BlipTextConfig(**a_ )
UpperCAmelCase_ : str = BlipVisionConfig(**a_ )
UpperCAmelCase_ : int = self.vision_config.hidden_size
UpperCAmelCase_ : List[str] = projection_dim
UpperCAmelCase_ : Any = logit_scale_init_value
UpperCAmelCase_ : int = 1.0
UpperCAmelCase_ : List[Any] = 0.0_2
UpperCAmelCase_ : int = image_text_hidden_size
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[str] , **__magic_name__ : int ) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Tuple = self.text_config.to_dict()
UpperCAmelCase_ : Tuple = self.vision_config.to_dict()
UpperCAmelCase_ : int = self.__class__.model_type
return output
| 707
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 0
|
'''simple docstring'''
snake_case_ : str = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case_ : List[Any] = concatenate_datasets
snake_case_ : Optional[int] = DownloadConfig
snake_case_ : Union[str, Any] = DownloadManager
snake_case_ : Any = DownloadMode
snake_case_ : int = DownloadConfig
snake_case_ : List[Any] = DownloadMode
snake_case_ : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 708
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
UpperCAmelCase_ : Union[str, Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A, _A )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = emb.weight.shape
UpperCAmelCase_ : Any = nn.Linear(_A, _A, bias=_A )
UpperCAmelCase_ : str = emb.weight.data
return lin_layer
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Any:
UpperCAmelCase_ : List[str] = {}
for old_key in state_dict.keys():
UpperCAmelCase_ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ : Union[str, Any] = key.replace('''moe_layer.experts.0''', F"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCAmelCase_ : Optional[int] = key.replace('''moe_layer.experts.''', '''ffn.experts.expert_''' )
if "gate" in key:
UpperCAmelCase_ : Optional[int] = key.replace('''.moe_layer.gate.wg''', '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
UpperCAmelCase_ : Dict = key.replace('''.fc2.''', '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
UpperCAmelCase_ : Union[str, Any] = key.replace('''.fc1.''', '''.ffn.fc1.''' )
if ".encoder_attn." in key:
UpperCAmelCase_ : Tuple = key.replace('''.encoder_attn.''', '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ : List[str] = key.replace('''encoder_attn_layer_norm''', '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
UpperCAmelCase_ : Dict = key.replace('''final_layer_norm''', '''ff_layer_norm''' )
UpperCAmelCase_ : str = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : str = WEIGHTS_NAME ) -> Union[str, Any]:
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Union[str, Any] = 0
os.makedirs(_A, exist_ok=_A )
for expert in range(_A ):
UpperCAmelCase_ : str = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
UpperCAmelCase_ : List[str] = torch.load(_A )['''model''']
remove_ignore_keys_(_A )
UpperCAmelCase_ : Dict = rename_fairseq_keys(_A, _A )
UpperCAmelCase_ : Optional[int] = os.path.join(
_A, weights_name.replace('''.bin''', F"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A, _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
UpperCAmelCase_ : Dict = os.path.join(_A, weights_name.replace('''.bin''', F"""-{len(_A )+1:05d}-of-???.bin""" ) )
UpperCAmelCase_ : Union[str, Any] = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_A )
UpperCAmelCase_ : Tuple = rename_fairseq_keys(_A, _A )
UpperCAmelCase_ : List[Any] = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
UpperCAmelCase_ : Optional[int] = os.path.join(_A, _A )
torch.save(_A, _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A, _A )
# Otherwise, let's build the index
UpperCAmelCase_ : Tuple = {}
for idx, shard in enumerate(_A ):
UpperCAmelCase_ : Tuple = weights_name.replace('''.bin''', F"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
UpperCAmelCase_ : Optional[int] = os.path.join(_A, weights_name.replace('''.bin''', F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A, os.path.join(_A, _A ) )
for key in shard:
UpperCAmelCase_ : Optional[Any] = shard_file
# Add the metadata
UpperCAmelCase_ : Optional[Any] = {'''total_size''': total_size}
UpperCAmelCase_ : Dict = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_A, _A ), '''w''', encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Dict = json.dumps(_A, indent=2, sort_keys=_A ) + '''\n'''
f.write(_A )
return metadata, index
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
snake_case_ : Tuple = parser.parse_args()
snake_case_ : List[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
snake_case_ : str = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case_ : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 0
|
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case_ = 'src/transformers'
# Matches is_xxx_available()
snake_case_ = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
snake_case_ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case_ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
snake_case_ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
snake_case_ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case_ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case_ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case_ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
snake_case_ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
snake_case_ = re.compile(r"^\s*try:")
# Catches a line with else:
snake_case_ = re.compile(r"^\s*else:")
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if _re_test_backend.search(A_ ) is None:
return None
UpperCAmelCase_ : str = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
with open(A_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
UpperCAmelCase_ : Dict = f.readlines()
UpperCAmelCase_ : Any = 0
while line_index < len(A_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ : Union[str, Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
UpperCAmelCase_ : Dict = _re_one_line_import_struct.search(A_ ).groups()[0]
UpperCAmelCase_ : List[Any] = re.findall('''\[([^\]]+)\]''', A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ : List[Any] = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
UpperCAmelCase_ : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ : Dict = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
UpperCAmelCase_ : Union[str, Any] = _re_import_struct_add_many.search(A_ ).groups()[0].split(''', ''' )
UpperCAmelCase_ : int = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
UpperCAmelCase_ : List[Any] = _re_between_brackets.search(A_ ).groups()[0].split(''', ''' )
UpperCAmelCase_ : Union[str, Any] = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''\"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ : str = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ : Optional[Any] = lines[line_index]
UpperCAmelCase_ : List[Any] = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ : Optional[int] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ : Union[str, Any] = lines[line_index]
UpperCAmelCase_ : List[str] = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
def find_duplicates(SCREAMING_SNAKE_CASE__ : Dict ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ : Optional[Any] = []
for key in import_dict_objects.keys():
UpperCAmelCase_ : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ : Tuple = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase_ ( ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
UpperCAmelCase_ : Union[str, Any] = os.path.join(A_, '''__init__.py''' )
UpperCAmelCase_ : Tuple = parse_init(A_ )
if objects is not None:
UpperCAmelCase_ : Any = analyze_results(*A_ )
if len(A_ ) > 0:
UpperCAmelCase_ : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(A_ ) )
if len(A_ ) > 0:
raise ValueError('''\n\n'''.join(A_ ) )
def lowerCamelCase_ ( ) -> int:
UpperCAmelCase_ : Any = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ : Dict = str((Path(A_ ) / folder).relative_to(A_ ) )
UpperCAmelCase_ : str = short_path.replace(os.path.sep, '''.''' )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ : str = str((Path(A_ ) / fname).relative_to(A_ ) )
UpperCAmelCase_ : Tuple = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A_ )
return submodules
snake_case_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase_ ( ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = importlib.util.spec_from_file_location(
'''transformers''', os.path.join(A_, '''__init__.py''' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
UpperCAmelCase_ : List[Any] = spec.loader.load_module()
UpperCAmelCase_ : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
UpperCAmelCase_ : Any = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 710
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class __a (snake_case__ ):
__a : Tuple = '''gptsan-japanese'''
__a : Optional[int] = [
'''past_key_values''',
]
__a : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , __magic_name__ : str=3_60_00 , __magic_name__ : List[Any]=12_80 , __magic_name__ : Optional[Any]=10_24 , __magic_name__ : Union[str, Any]=81_92 , __magic_name__ : Any=40_96 , __magic_name__ : Union[str, Any]=1_28 , __magic_name__ : List[Any]=10 , __magic_name__ : List[str]=0 , __magic_name__ : Optional[int]=16 , __magic_name__ : str=16 , __magic_name__ : List[Any]=1_28 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=1E-5 , __magic_name__ : Optional[Any]=False , __magic_name__ : List[str]=0.0 , __magic_name__ : List[Any]="float32" , __magic_name__ : List[str]=False , __magic_name__ : List[Any]=False , __magic_name__ : Optional[Any]=False , __magic_name__ : Tuple=0.0_0_2 , __magic_name__ : List[Any]=False , __magic_name__ : Tuple=True , __magic_name__ : Optional[Any]=3_59_98 , __magic_name__ : Dict=3_59_95 , __magic_name__ : List[str]=3_59_99 , **__magic_name__ : List[Any] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Dict = d_model
UpperCAmelCase_ : Optional[int] = d_ff
UpperCAmelCase_ : List[str] = d_ext
UpperCAmelCase_ : Dict = d_spout
UpperCAmelCase_ : Any = num_switch_layers
UpperCAmelCase_ : List[str] = num_ext_layers
UpperCAmelCase_ : int = num_switch_layers + num_ext_layers
UpperCAmelCase_ : Any = num_heads
UpperCAmelCase_ : Optional[Any] = num_experts
UpperCAmelCase_ : Optional[Any] = expert_capacity
UpperCAmelCase_ : str = dropout_rate
UpperCAmelCase_ : Tuple = layer_norm_epsilon
UpperCAmelCase_ : Tuple = router_bias
UpperCAmelCase_ : List[str] = router_jitter_noise
UpperCAmelCase_ : List[str] = router_dtype
UpperCAmelCase_ : Tuple = router_ignore_padding_tokens
UpperCAmelCase_ : int = output_hidden_states
UpperCAmelCase_ : Any = output_attentions
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : str = output_router_logits
UpperCAmelCase_ : Optional[Any] = use_cache
super().__init__(
separator_token_id=_A , pad_token_id=_A , eos_token_id=_A , **_A , )
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 0
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> List[Tuple[int, ...]]:
UpperCAmelCase_ : Union[str, Any] = []
if isinstance(__snake_case, __snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple[int, ...]:
UpperCAmelCase_ : Any = []
for d in reversed(__snake_case ):
idx.append(flat_idx % d )
UpperCAmelCase_ : Any = flat_idx // d
return tuple(reversed(__snake_case ) )
@torch.jit.ignore
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Tuple = None, SCREAMING_SNAKE_CASE__ : Union[str, Any] = None, ) -> List[Tuple[slice, ...]]:
def reduce_edge_list(SCREAMING_SNAKE_CASE__ : int ) -> None:
UpperCAmelCase_ : str = True
for i in range(len(__snake_case ) ):
UpperCAmelCase_ : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ : Tuple = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(__snake_case )
if end_edges is None:
UpperCAmelCase_ : Any = [e == (d - 1) for e, d in zip(__snake_case, __snake_case )]
reduce_edge_list(__snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__snake_case ) == 0:
return [()]
elif len(__snake_case ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__snake_case, __snake_case ):
if s == e:
path_list.append(slice(__snake_case, s + 1 ) )
else:
break
UpperCAmelCase_ : Union[str, Any] = tuple(__snake_case )
UpperCAmelCase_ : Dict = len(__snake_case )
# start == end, and we're done
if divergence_idx == len(__snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ : Union[str, Any] = start[divergence_idx]
return tuple(
path + (slice(__snake_case, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ : List[Any] = end[divergence_idx]
return tuple(
path + (slice(__snake_case, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Dict ) -> torch.Tensor:
UpperCAmelCase_ : str = t.shape[:no_batch_dims]
UpperCAmelCase_ : Union[str, Any] = list(_flat_idx_to_idx(__snake_case, __snake_case ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ : Tuple = list(_flat_idx_to_idx(flat_end - 1, __snake_case ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ : Optional[int] = _get_minimal_slice_set(
__snake_case, __snake_case, __snake_case, )
UpperCAmelCase_ : Union[str, Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[Any] = False, SCREAMING_SNAKE_CASE__ : Union[str, Any] = None, SCREAMING_SNAKE_CASE__ : List[Any] = False, ) -> Any:
if not (len(__snake_case ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCAmelCase_ : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__snake_case )]
UpperCAmelCase_ : Union[str, Any] = tuple([max(__snake_case ) for s in zip(*__snake_case )] )
def _prep_inputs(SCREAMING_SNAKE_CASE__ : Dict ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ : Optional[Any] = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ : str = tensor_tree_map(_prep_inputs, __snake_case )
UpperCAmelCase_ : Optional[int] = None
if _out is not None:
UpperCAmelCase_ : int = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
UpperCAmelCase_ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ : Dict = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE__ : str ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Optional[Any] = prepped_outputs
for _ in range(__snake_case ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ : Optional[Any] = _select_chunk
else:
UpperCAmelCase_ : Union[str, Any] = partial(
_chunk_slice, flat_start=__snake_case, flat_end=min(__snake_case, i + chunk_size ), no_batch_dims=len(__snake_case ), )
UpperCAmelCase_ : List[str] = tensor_tree_map(__snake_case, __snake_case )
# Run the layer on the chunk
UpperCAmelCase_ : int = layer(**__snake_case )
# Allocate space for the output
if out is None:
UpperCAmelCase_ : Union[str, Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), __snake_case )
# Put the chunk in its pre-allocated space
if isinstance(__snake_case, __snake_case ):
def assign(SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Dict ) -> None:
for k, v in da.items():
if isinstance(__snake_case, __snake_case ):
assign(__snake_case, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ : List[Any] = da[k]
assign(__snake_case, __snake_case )
elif isinstance(__snake_case, __snake_case ):
for xa, xa in zip(__snake_case, __snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ : int = xa
elif isinstance(__snake_case, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ : Union[str, Any] = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCAmelCase_ : Optional[int] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : t.view(orig_batch_dims + t.shape[1:] ), __snake_case )
return out
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int = 5_12 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = max_chunk_size
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
def UpperCAmelCase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] ) -> int:
"""simple docstring"""
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ : Optional[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ : Any = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__magic_name__ : Dict ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : str = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ : Any = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ : str = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : int = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : Dict = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda __magic_name__ : x[0] )]
UpperCAmelCase_ : Tuple = [v for _, v in sorted(aa.items() , key=lambda __magic_name__ : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[Any] = tree_map(lambda __magic_name__ : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ : Tuple = False
if not consistent:
UpperCAmelCase_ : Union[str, Any] = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 712
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 0
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
snake_case_ : Optional[int] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
warnings.warn(lowerCamelCase_, lowerCamelCase_ )
requires_backends(lowerCamelCase_, '''sklearn''' )
return (preds == labels).mean()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
warnings.warn(lowerCamelCase_, lowerCamelCase_ )
requires_backends(lowerCamelCase_, '''sklearn''' )
UpperCAmelCase_ : Any = simple_accuracy(lowerCamelCase_, lowerCamelCase_ )
UpperCAmelCase_ : Any = fa_score(y_true=lowerCamelCase_, y_pred=lowerCamelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
warnings.warn(lowerCamelCase_, lowerCamelCase_ )
requires_backends(lowerCamelCase_, '''sklearn''' )
UpperCAmelCase_ : Optional[int] = pearsonr(lowerCamelCase_, lowerCamelCase_ )[0]
UpperCAmelCase_ : List[str] = spearmanr(lowerCamelCase_, lowerCamelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
warnings.warn(lowerCamelCase_, lowerCamelCase_ )
requires_backends(lowerCamelCase_, '''sklearn''' )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase_, lowerCamelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase_, lowerCamelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase_, lowerCamelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase_, lowerCamelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
warnings.warn(lowerCamelCase_, lowerCamelCase_ )
requires_backends(lowerCamelCase_, '''sklearn''' )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase_, lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
| 713
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 0
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
snake_case_ : List[str] = False
try:
snake_case_ : int = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __a :
def __init__( self : str , __magic_name__ : str = None , __magic_name__ : list = [] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Union[str, Any] = choices
UpperCAmelCase_ : Union[str, Any] = prompt
if sys.platform == "win32":
UpperCAmelCase_ : Union[str, Any] = '''*'''
else:
UpperCAmelCase_ : Optional[int] = '''➔ '''
def UpperCAmelCase__ ( self : int , __magic_name__ : List[Any] , __magic_name__ : str = "" ) -> Optional[int]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Direction , __magic_name__ : int = 1 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = int(chr(self.current_selection ) )
UpperCAmelCase_ : Optional[int] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def UpperCAmelCase__ ( self : int , __magic_name__ : int = 0 ) -> Any:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
UpperCAmelCase_ : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ : Optional[int] = int(builtins.input() )
except ValueError:
UpperCAmelCase_ : List[str] = default_choice
else:
UpperCAmelCase_ : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(UpperCamelCase_ , '''\n''' )
return choice
| 714
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 0
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
snake_case_ : Any = logging.get_logger(__name__)
class __a (_UpperCamelCase ):
__a : int = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : int = 8 , **__magic_name__ : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCAmelCase_ : str = do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor
UpperCAmelCase_ : Any = do_pad
UpperCAmelCase_ : Optional[Any] = pad_size
def UpperCAmelCase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : Optional[Union[str, ChannelDimension]] = None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = get_image_size(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = (old_height // size + 1) * size - old_height
UpperCAmelCase_ : List[Any] = (old_width // size + 1) * size - old_width
return pad(__magic_name__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : Dict , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : str = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ : Optional[int] = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ : Tuple = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Any = [to_numpy_array(__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Any = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_pad:
UpperCAmelCase_ : Tuple = [self.pad(__magic_name__ , size=__magic_name__ ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 715
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> bool:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Dict = number
while duplicate > 0:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = divmod(a__, 10 )
fact_sum += factorial(a__ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
snake_case_ : Dict = int(input("Enter number: ").strip())
print(
f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
snake_case_ : int = TypeVar("T")
class __a (Generic[T] ):
def __init__( self : List[str] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Dict = self
UpperCAmelCase_ : List[str] = 0
class __a (Generic[T] ):
def __init__( self : List[Any] ) -> None:
"""simple docstring"""
# map from node name to the node object
UpperCAmelCase_ : Optional[Any] = {}
def UpperCAmelCase__ ( self : str , __magic_name__ : Tuple ) -> None:
"""simple docstring"""
# create a new set with x as its member
UpperCAmelCase_ : List[Any] = DisjointSetTreeNode(lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Any ) -> DisjointSetTreeNode[T]:
"""simple docstring"""
# find the set x belongs to (with path-compression)
UpperCAmelCase_ : Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
UpperCAmelCase_ : str = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Any ) -> None:
"""simple docstring"""
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCAmelCase_ : Optional[int] = nodea
else:
UpperCAmelCase_ : List[str] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> None:
"""simple docstring"""
# merge 2 disjoint sets
self.link(self.find_set(lowerCAmelCase_ ) , self.find_set(lowerCAmelCase_ ) )
class __a (Generic[T] ):
def __init__( self : int ) -> None:
"""simple docstring"""
# connections: map from the node to the neighbouring nodes (with weights)
UpperCAmelCase_ : Optional[Any] = {}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Any ) -> None:
"""simple docstring"""
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCAmelCase_ : Dict = {}
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> None:
"""simple docstring"""
# add an edge with the given weight
self.add_node(lowerCAmelCase_ )
self.add_node(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = weight
UpperCAmelCase_ : Union[str, Any] = weight
def UpperCAmelCase__ ( self : Dict ) -> GraphUndirectedWeighted[T]:
"""simple docstring"""
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[int] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __magic_name__ : x[2] )
# creating the disjoint set
UpperCAmelCase_ : int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCAmelCase_ )
# MST generation
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[int] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = edges[index]
index += 1
UpperCAmelCase_ : int = disjoint_set.find_set(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = disjoint_set.find_set(lowerCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
disjoint_set.union(lowerCAmelCase_ , lowerCAmelCase_ )
return graph
| 717
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case_ : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : str ) -> Any:
UpperCAmelCase_ : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = val
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
UpperCAmelCase_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : Dict = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
UpperCAmelCase_ : Dict = value
else:
UpperCAmelCase_ : Tuple = value
return new_state_dict
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
UpperCAmelCase_ : int = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : List[str] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : Optional[Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :]
UpperCAmelCase_ : int = in_proj_bias[:256]
UpperCAmelCase_ : int = in_proj_weight[256:512, :]
UpperCAmelCase_ : int = in_proj_bias[256:512]
UpperCAmelCase_ : Tuple = in_proj_weight[-256:, :]
UpperCAmelCase_ : Any = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[int] = in_proj_weight[:256, :]
UpperCAmelCase_ : Dict = in_proj_bias[:256]
UpperCAmelCase_ : Any = in_proj_weight[256:512, :]
UpperCAmelCase_ : List[str] = in_proj_bias[256:512]
UpperCAmelCase_ : int = in_proj_weight[-256:, :]
UpperCAmelCase_ : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : Optional[Any] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : Tuple = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ : int = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ : Any = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ : List[Any] = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ : int = in_proj_bias_cross_attn[-256:]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
UpperCAmelCase_ : List[str] = image.size
UpperCAmelCase_ : str = max(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = 800 if '''detection''' in checkpoint_url else 1000
UpperCAmelCase_ : int = target_max_size / current_max_size
UpperCAmelCase_ : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = F.to_tensor(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = F.normalize(SCREAMING_SNAKE_CASE__, mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
logger.info('''Converting model...''' )
# load original state dict
UpperCAmelCase_ : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__, map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : List[str] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''', mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Optional[Any] = 15
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Dict = {0: '''table''', 1: '''table rotated'''}
UpperCAmelCase_ : Dict = idalabel
UpperCAmelCase_ : str = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Union[str, Any] = 125
UpperCAmelCase_ : Union[str, Any] = 6
UpperCAmelCase_ : Union[str, Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = DetrImageProcessor(
format='''coco_detection''', max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCAmelCase_ : Any = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion
UpperCAmelCase_ : Any = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCAmelCase_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''', repo_type='''dataset''', filename=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = Image.open(SCREAMING_SNAKE_CASE__ ).convert('''RGB''' )
UpperCAmelCase_ : Dict = normalize(resize(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 )
UpperCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE__ )
if "detection" in checkpoint_url:
UpperCAmelCase_ : int = (1, 15, 3)
UpperCAmelCase_ : Tuple = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
UpperCAmelCase_ : List[str] = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
UpperCAmelCase_ : Dict = (1, 125, 7)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
UpperCAmelCase_ : int = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCAmelCase_ : Tuple = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case_ : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 718
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 0
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = """T5Config"""
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[str] ) -> jnp.ndarray:
UpperCAmelCase_ : Tuple = jnp.zeros_like(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase_ : Optional[int] = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = jnp.where(shifted_input_ids == -100, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return shifted_input_ids
class __a (SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = "mt5"
__a : Tuple = MTaConfig
class __a (SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = "mt5"
__a : Union[str, Any] = MTaConfig
class __a (SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = "mt5"
__a : Optional[Any] = MTaConfig
| 719
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 0
|
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
return abs(__a ) if a == 0 else greatest_common_divisor(b % a, __a )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = y, x % y
return abs(__a )
def lowerCamelCase_ ( ) -> Tuple:
try:
UpperCAmelCase_ : Any = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[Any] = int(nums[0] )
UpperCAmelCase_ : List[str] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(__a, __a )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__a, __a )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 720
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 0
|
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : bool = True, *SCREAMING_SNAKE_CASE__ : Optional[int], **SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCAmelCase_ : List[str] = False
if main_process_only:
UpperCAmelCase_ : Any = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__, disable=SCREAMING_SNAKE_CASE__ )
| 721
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
snake_case_ : int = HUGGINGFACE_HUB_CACHE
snake_case_ : str = "config.json"
snake_case_ : Union[str, Any] = "diffusion_pytorch_model.bin"
snake_case_ : Union[str, Any] = "diffusion_flax_model.msgpack"
snake_case_ : Tuple = "model.onnx"
snake_case_ : Tuple = "diffusion_pytorch_model.safetensors"
snake_case_ : Dict = "weights.pb"
snake_case_ : Union[str, Any] = "https://huggingface.co"
snake_case_ : str = default_cache_path
snake_case_ : List[Any] = "diffusers_modules"
snake_case_ : Optional[int] = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
snake_case_ : Tuple = ["fp16", "non-ema"]
snake_case_ : List[Any] = ".self_attn"
| 700
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 0
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case_ : Optional[int] = "bart"
snake_case_ : Tuple = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowerCamelCase_ ( ) -> str:
if LOAD_DENSE_INDEX:
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
UpperCAmelCase_ : Dict = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
UpperCAmelCase_ : Any = qar_model.eval()
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
UpperCAmelCase_ : Any = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
UpperCAmelCase_ : Tuple = sas_model.eval()
else:
UpperCAmelCase_ , UpperCAmelCase_ : Any = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowerCamelCase_ ( ) -> Dict:
if LOAD_DENSE_INDEX:
UpperCAmelCase_ : str = faiss.StandardGpuResources()
UpperCAmelCase_ : Tuple = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
UpperCAmelCase_ : Union[str, Any] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 128), )
UpperCAmelCase_ : Dict = faiss.IndexFlatIP(128 )
UpperCAmelCase_ : Dict = faiss.index_cpu_to_gpu(_lowerCAmelCase, 1, _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
UpperCAmelCase_ , UpperCAmelCase_ : str = (None, None)
UpperCAmelCase_ : Union[str, Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowerCamelCase_ ( ) -> List[Any]:
UpperCAmelCase_ : List[Any] = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
UpperCAmelCase_ : Any = elia['''train_eli5''']
UpperCAmelCase_ : Union[str, Any] = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 128) )
UpperCAmelCase_ : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
snake_case_ ,snake_case_ ,snake_case_ : Optional[Any] = load_indexes()
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : List[Any] = load_models()
snake_case_ ,snake_case_ : List[Any] = load_train_data()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : int=10 ) -> List[Any]:
UpperCAmelCase_ : Any = embed_questions_for_retrieval([question], _lowerCAmelCase, _lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = eli5_train_q_index.search(_lowerCAmelCase, _lowerCAmelCase )
UpperCAmelCase_ : Tuple = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Optional[Any]="wiki40b", SCREAMING_SNAKE_CASE__ : str="dense", SCREAMING_SNAKE_CASE__ : List[str]=10 ) -> int:
if source == "none":
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase_ , UpperCAmelCase_ : int = query_qa_dense_index(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = query_es_index(
_lowerCAmelCase, _lowerCAmelCase, index_name='''english_wiki40b_snippets_100w''', n_results=_lowerCAmelCase, )
UpperCAmelCase_ : Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
UpperCAmelCase_ : Optional[Any] = '''question: {} context: {}'''.format(_lowerCAmelCase, _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE__ : None),
} )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Union[str, Any]=64, SCREAMING_SNAKE_CASE__ : List[Any]=256, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : List[Any]=2, SCREAMING_SNAKE_CASE__ : Dict=0.95, SCREAMING_SNAKE_CASE__ : Optional[Any]=0.8 ) -> Any:
with torch.no_grad():
UpperCAmelCase_ : Dict = qa_sas_generate(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, num_answers=1, num_beams=_lowerCAmelCase, min_len=_lowerCAmelCase, max_len=_lowerCAmelCase, do_sample=_lowerCAmelCase, temp=_lowerCAmelCase, top_p=_lowerCAmelCase, top_k=_lowerCAmelCase, max_input_length=1024, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
snake_case_ : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
snake_case_ : Union[str, Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case_ : Optional[int] = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case_ : List[Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
snake_case_ : List[Any] = st.sidebar.checkbox("Demo options")
if demo_options:
snake_case_ : Optional[Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
snake_case_ : List[Any] = action_list.index(action_st)
snake_case_ : Any = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
snake_case_ : List[str] = show_type == "Show full text of passages"
else:
snake_case_ : Optional[int] = 3
snake_case_ : List[Any] = True
snake_case_ : Optional[int] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
snake_case_ : Any = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
snake_case_ : str = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
snake_case_ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
snake_case_ : Any = "wiki40b"
snake_case_ : Optional[Any] = "dense"
snake_case_ : Dict = "beam"
snake_case_ : List[str] = 2
snake_case_ : Union[str, Any] = 64
snake_case_ : Any = 2_56
snake_case_ : str = None
snake_case_ : List[str] = None
snake_case_ : List[Any] = st.sidebar.checkbox("Generation options")
if generate_options:
snake_case_ : Optional[int] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
snake_case_ : Optional[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
snake_case_ : Any = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
snake_case_ : Optional[int] = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
snake_case_ : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case_ : Tuple = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
snake_case_ : Union[str, Any] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
snake_case_ : List[str] = None
# start main text
snake_case_ : Optional[int] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
snake_case_ : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case_ : str = st.text_input("Enter your question here:", "")
else:
snake_case_ : str = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case_ ,snake_case_ : Optional[Any] = make_support(question, source=wiki_source, method="dense", n_results=10)
snake_case_ ,snake_case_ : List[str] = make_support(question, source=wiki_source, method="sparse", n_results=10)
snake_case_ : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case_ : str = support_list[:10]
snake_case_ : Optional[Any] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
snake_case_ ,snake_case_ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case_ ,snake_case_ : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
snake_case_ : int = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
snake_case_ : List[str] = res[1].strip()
if sec_titles == "":
snake_case_ : List[Any] = "[{}]({})".format(res[0], wiki_url)
else:
snake_case_ : str = sec_titles.split(" & ")
snake_case_ : str = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
snake_case_ : str = find_nearest_training(question)
snake_case_ : Tuple = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
snake_case_ : Dict = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
snake_case_ : Dict = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 701
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 50 ) -> str:
UpperCAmelCase_ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 702
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Any = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
snake_case_ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase_ : Optional[Any] = k.replace(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
return k
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = PegasusConfig(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = torch_model.model.state_dict()
UpperCAmelCase_ : Union[str, Any] = {}
for k, v in tf_weights.items():
UpperCAmelCase_ : List[str] = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase_ : Dict = v.T
UpperCAmelCase_ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE, dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
UpperCAmelCase_ : Optional[int] = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
UpperCAmelCase_ : Union[str, Any] = mapping["shared.weight"]
UpperCAmelCase_ : Optional[int] = mapping["shared.weight"]
UpperCAmelCase_ : Optional[int] = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE, strict=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Optional[Any]:
UpperCAmelCase_ : Any = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = ["Adafactor", "global_step"]
for name, shape in tqdm(__SCREAMING_SNAKE_CASE, desc='''converting tf checkpoint to dict''' ):
UpperCAmelCase_ : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ : Dict = tf.train.load_variable(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = array
return tf_weights
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ).parent.name
UpperCAmelCase_ : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["max_position_embeddings"]
UpperCAmelCase_ : int = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''', model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
UpperCAmelCase_ : Tuple = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
UpperCAmelCase_ : Optional[int] = task_specific_params
UpperCAmelCase_ : Optional[int] = convert_pegasus(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__SCREAMING_SNAKE_CASE, Path(__SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
snake_case_ : Tuple = parser.parse_args()
if args.save_dir is None:
snake_case_ : Any = Path(args.tf_ckpt_path).parent.name
snake_case_ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 704
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : int = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __a (_UpperCAmelCase ):
__a : str = '''switch_transformers'''
__a : Any = ['''past_key_values''']
__a : Optional[int] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[int] , __magic_name__ : List[str]=3_21_28 , __magic_name__ : Tuple=7_68 , __magic_name__ : List[str]=64 , __magic_name__ : Optional[Any]=20_48 , __magic_name__ : Dict=64 , __magic_name__ : Optional[int]=12 , __magic_name__ : int=3 , __magic_name__ : Tuple=12 , __magic_name__ : List[str]=3 , __magic_name__ : List[str]=12 , __magic_name__ : List[Any]=8 , __magic_name__ : List[str]=False , __magic_name__ : Dict=0.0_1 , __magic_name__ : Dict="float32" , __magic_name__ : int=False , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=1_28 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Any=1E-6 , __magic_name__ : int=0.0_0_1 , __magic_name__ : Union[str, Any]=0.0_0_1 , __magic_name__ : Dict=1.0 , __magic_name__ : List[str]="relu" , __magic_name__ : List[str]=True , __magic_name__ : Optional[Any]=False , __magic_name__ : Union[str, Any]=True , __magic_name__ : Dict=0 , __magic_name__ : List[Any]=1 , **__magic_name__ : Dict , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : Optional[Any] = d_kv
UpperCAmelCase_ : str = d_ff
UpperCAmelCase_ : List[Any] = num_sparse_encoder_layers
UpperCAmelCase_ : List[Any] = num_layers
UpperCAmelCase_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : int = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCAmelCase_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCAmelCase_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCAmelCase_ : Optional[int] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCAmelCase_ : Any = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Optional[int] = num_experts
UpperCAmelCase_ : Dict = expert_capacity
UpperCAmelCase_ : Dict = router_bias
UpperCAmelCase_ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase_ : List[str] = router_dtype
UpperCAmelCase_ : List[str] = router_ignore_padding_tokens
UpperCAmelCase_ : int = relative_attention_num_buckets
UpperCAmelCase_ : str = relative_attention_max_distance
UpperCAmelCase_ : Dict = dropout_rate
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_factor
UpperCAmelCase_ : List[str] = feed_forward_proj
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : int = add_router_probs
UpperCAmelCase_ : Optional[int] = router_z_loss_coef
UpperCAmelCase_ : List[Any] = router_aux_loss_coef
UpperCAmelCase_ : Optional[int] = self.feed_forward_proj.split('''-''' )
UpperCAmelCase_ : List[str] = act_info[-1]
UpperCAmelCase_ : str = act_info[0] == '''gated'''
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : Dict = '''gelu_new'''
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
UpperCAmelCase_ : int = nums[0]
for i in range(1, len(_lowerCamelCase ) ):
UpperCAmelCase_ : str = nums[i]
UpperCAmelCase_ : Tuple = max(_lowerCamelCase, ans + num, _lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
snake_case_ : Union[str, Any] = int(input("Enter number of elements : ").strip())
snake_case_ : Union[str, Any] = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 706
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 0
|
'''simple docstring'''
class __a :
def __init__( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = ''''''
UpperCAmelCase_ : List[Any] = ''''''
UpperCAmelCase_ : Optional[int] = []
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase_ : List[str] = self.__min_dist_top_down_dp(__UpperCamelCase , n - 1 )
UpperCAmelCase_ : Dict = self.__min_dist_top_down_dp(m - 1 , __UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase_ : int = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = worda
UpperCAmelCase_ : Tuple = worda
UpperCAmelCase_ : List[Any] = [[-1 for _ in range(len(__UpperCamelCase ) )] for _ in range(len(__UpperCamelCase ) )]
return self.__min_dist_top_down_dp(len(__UpperCamelCase ) - 1 , len(__UpperCamelCase ) - 1 )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = worda
UpperCAmelCase_ : Optional[Any] = worda
UpperCAmelCase_ : Dict = len(__UpperCamelCase )
UpperCAmelCase_ : Tuple = len(__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase_ : Dict = j
elif j == 0: # second string is empty
UpperCAmelCase_ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase_ : int = self.dp[i - 1][j - 1]
else:
UpperCAmelCase_ : Optional[Any] = self.dp[i][j - 1]
UpperCAmelCase_ : int = self.dp[i - 1][j]
UpperCAmelCase_ : List[Any] = self.dp[i - 1][j - 1]
UpperCAmelCase_ : Any = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase : List[Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowerCamelCase : Dict = input("Enter the first string: ").strip()
lowerCamelCase : str = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 707
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 0
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __a (A_ ):
def __init__( self : Optional[int] , *__magic_name__ : List[str] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = {}
if prompt is not None:
UpperCAmelCase_ : List[str] = prompt
if generate_kwargs is not None:
UpperCAmelCase_ : int = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
UpperCAmelCase_ : List[str] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : str , __magic_name__ : int , **__magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
return super().__call__(__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : str=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = load_image(__magic_name__ )
if prompt is not None:
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError(
F"""Received an invalid text input, got - {type(__magic_name__ )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
UpperCAmelCase_ : int = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ : List[str] = self.image_processor(images=__magic_name__ , return_tensors=self.framework )
UpperCAmelCase_ : Any = self.tokenizer(text=__magic_name__ , add_special_tokens=__magic_name__ ).input_ids
UpperCAmelCase_ : List[str] = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ : Optional[Any] = torch.tensor(__magic_name__ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ : Any = self.image_processor(images=__magic_name__ , header_text=__magic_name__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ : str = self.image_processor(images=__magic_name__ , return_tensors=self.framework )
UpperCAmelCase_ : Dict = self.tokenizer(__magic_name__ , return_tensors=self.framework )
model_inputs.update(__magic_name__ )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
UpperCAmelCase_ : List[Any] = self.image_processor(images=__magic_name__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ : Optional[Any] = None
return model_inputs
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=None ) -> str:
"""simple docstring"""
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __magic_name__ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
UpperCAmelCase_ : Optional[Any] = None
if generate_kwargs is None:
UpperCAmelCase_ : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ : List[str] = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(__magic_name__ , **__magic_name__ , **__magic_name__ )
return model_outputs
def UpperCAmelCase__ ( self : Any , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = []
for output_ids in model_outputs:
UpperCAmelCase_ : int = {
"""generated_text""": self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , )
}
records.append(__magic_name__ )
return records
| 708
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 0
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
snake_case_ : Optional[int] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCamelCase_ ( ) -> Tuple:
UpperCAmelCase_ : str = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase_ : Optional[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase_ : Optional[int] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase_ : Tuple = sorted([comment for comment in issue.get_comments()], key=lambda SCREAMING_SNAKE_CASE__ : i.created_at, reverse=lowerCAmelCase__ )
UpperCAmelCase_ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Union[str, Any] = PNDMScheduler()
UpperCAmelCase_ : Optional[int] = PNDMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = pndm(generator=lowerCamelCase_ , num_inference_steps=20 , output_type='''numpy''' ).images
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pndm(generator=lowerCamelCase_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=lowerCamelCase_ )[0]
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''google/ddpm-cifar10-32'''
UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = PNDMScheduler()
UpperCAmelCase_ : str = PNDMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pndm(generator=lowerCamelCase_ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Any = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 710
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a (lowercase_ ):
__a : List[str] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__a : List[Any] = '''CIDAS/clipseg-rd64-refined'''
__a : List[str] = '''image_segmenter'''
__a : Tuple = CLIPSegForImageSegmentation
__a : Tuple = ['''image''', '''text''']
__a : List[Any] = ['''image''']
def __init__( self : Dict , *__magic_name__ : List[str] , **__magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : int , __magic_name__ : "Image" , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = self.model(**UpperCamelCase__ ).logits
return logits
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = outputs.cpu().detach().numpy()
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {"vocab_file": "spiece.model"}
snake_case_ : str = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case_ : Optional[Any] = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
snake_case_ : Union[str, Any] = "▁"
class __a (__UpperCAmelCase ):
__a : List[Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , __magic_name__ : Optional[int] , __magic_name__ : Tuple=True , __magic_name__ : List[str]=True , __magic_name__ : Union[str, Any]=False , __magic_name__ : str="[CLS]" , __magic_name__ : int="[SEP]" , __magic_name__ : Dict="<unk>" , __magic_name__ : Dict="[SEP]" , __magic_name__ : Any="<pad>" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Any="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : str , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = (
AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else mask_token
)
UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : List[Any] = remove_space
UpperCAmelCase_ : List[Any] = keep_accents
UpperCAmelCase_ : List[Any] = vocab_file
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[int] = None
return state
def __setstate__( self : Optional[Any] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.remove_space:
UpperCAmelCase_ : Dict = ''' '''.join(inputs.strip().split() )
else:
UpperCAmelCase_ : Union[str, Any] = inputs
UpperCAmelCase_ : int = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
UpperCAmelCase_ : Dict = unicodedata.normalize('''NFKD''' , lowerCAmelCase_ )
UpperCAmelCase_ : str = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
UpperCAmelCase_ : Dict = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : int = self.preprocess_text(lowerCAmelCase_ )
UpperCAmelCase_ : Any = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
UpperCAmelCase_ : Any = []
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCAmelCase_ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase_ : Optional[int] = cur_pieces[1:]
else:
UpperCAmelCase_ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = ''''''
UpperCAmelCase_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : int = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
UpperCAmelCase_ : Any = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> str:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> List[Any]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , '''wb''' ) as fi:
UpperCAmelCase_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 712
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 0
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> typing.Counter[int]:
UpperCAmelCase_ : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(a_, max_perimeter + 1 ):
UpperCAmelCase_ : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a_ ):
UpperCAmelCase_ : List[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
UpperCAmelCase_ : Dict = pythagorean_triple(a_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 713
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class __a (_a ):
__a : Dict = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__a : Optional[Any] = Features({"question": Value("string" ), "context": Value("string" )} )
__a : List[Any] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__a : str = "question"
__a : Union[str, Any] = "context"
__a : Tuple = "answers"
@property
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 714
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 0
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
snake_case_ : Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> int:
UpperCAmelCase_ : Optional[Any] = git.Repo(search_parent_directories=UpperCAmelCase__ )
UpperCAmelCase_ : List[Any] = {
'''repo_id''': str(UpperCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(UpperCAmelCase__, '''git_log.json''' ), '''w''' ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__, indent=4 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str:
if params.n_gpu <= 0:
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase_ : List[str] = int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase_ : Any = int(os.environ['''N_GPU_NODE'''] )
UpperCAmelCase_ : List[str] = int(os.environ['''RANK'''] )
# number of nodes / node ID
UpperCAmelCase_ : Optional[int] = params.world_size // params.n_gpu_per_node
UpperCAmelCase_ : str = params.global_rank // params.n_gpu_per_node
UpperCAmelCase_ : Tuple = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase_ : str = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase_ : Optional[int] = params.n_nodes > 1
# summary
UpperCAmelCase_ : Union[str, Any] = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''', backend='''nccl''', )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 715
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
UpperCAmelCase_ : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 0:
return False
UpperCAmelCase_ : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase_ : Optional[Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase_ : List[Any] = [0 for i in range(0 , 4 * size )]
UpperCAmelCase_ : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return idx * 2
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
return idx * 2 + 1
def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
if left_element == right_element:
UpperCAmelCase_ : int = a[left_element - 1]
else:
UpperCAmelCase_ : List[Any] = (left_element + right_element) // 2
self.build(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ )
self.build(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : str = max(
self.segment_tree[self.left(__magic_name__ )] , self.segment_tree[self.right(__magic_name__ )] )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase_ : List[str] = self.lazy[idx]
UpperCAmelCase_ : Any = False
if left_element != right_element:
UpperCAmelCase_ : List[Any] = self.lazy[idx]
UpperCAmelCase_ : Any = self.lazy[idx]
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase_ : Union[str, Any] = val
if left_element != right_element:
UpperCAmelCase_ : Union[str, Any] = val
UpperCAmelCase_ : List[Any] = val
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Dict = True
return True
UpperCAmelCase_ : List[Any] = (left_element + right_element) // 2
self.update(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.update(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Optional[Any] = max(
self.segment_tree[self.left(__magic_name__ )] , self.segment_tree[self.right(__magic_name__ )] )
return True
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase_ : Optional[int] = self.lazy[idx]
UpperCAmelCase_ : List[str] = False
if left_element != right_element:
UpperCAmelCase_ : Dict = self.lazy[idx]
UpperCAmelCase_ : Union[str, Any] = self.lazy[idx]
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : List[Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase_ : List[str] = (left_element + right_element) // 2
UpperCAmelCase_ : Dict = self.query(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Union[str, Any] = self.query(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ , __magic_name__ )
return max(__magic_name__ , __magic_name__ )
def __str__( self : List[str] ) -> List[Any]:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , __magic_name__ , __magic_name__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
snake_case_ : str = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
snake_case_ : str = 15
snake_case_ : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 717
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Optional[int] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
snake_case_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 0
|
'''simple docstring'''
from typing import List
import numpy as np
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict ) -> int:
UpperCAmelCase_ : Tuple = {key: len(snake_case_ ) for key, value in gen_kwargs.items() if isinstance(snake_case_, snake_case_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
UpperCAmelCase_ : List[Any] = max(lists_lengths.values(), default=0 )
return max(1, snake_case_ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> Any:
UpperCAmelCase_ : List[Any] = []
for group_idx in range(snake_case_ ):
UpperCAmelCase_ : int = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCAmelCase_ : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCAmelCase_ : List[Any] = range(snake_case_, start + num_shards_to_add )
shards_indices_per_group.append(snake_case_ )
return shards_indices_per_group
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
UpperCAmelCase_ : Any = _number_of_shards_in_gen_kwargs(snake_case_ )
if num_shards == 1:
return [dict(snake_case_ )]
else:
UpperCAmelCase_ : Optional[Any] = _distribute_shards(num_shards=snake_case_, max_num_jobs=snake_case_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case_, snake_case_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case_ ) )
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[dict] ) -> Union[str, Any]:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], snake_case_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : np.random.Generator, SCREAMING_SNAKE_CASE__ : dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = {len(snake_case_ ) for value in gen_kwargs.values() if isinstance(snake_case_, snake_case_ )}
UpperCAmelCase_ : Tuple = {}
for size in list_sizes:
UpperCAmelCase_ : int = list(range(snake_case_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCAmelCase_ : int = dict(snake_case_ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case_, snake_case_ ):
UpperCAmelCase_ : List[Any] = [value[i] for i in indices_per_size[len(snake_case_ )]]
return shuffled_kwargs
| 719
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 0
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __a (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__a : List[Any] = StableDiffusionControlNetImgaImgPipeline
__a : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__a : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ : List[str] = CLIPTextModel(__magic_name__ )
UpperCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Union[str, Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any]=0 ) -> List[str]:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCAmelCase_ : int = torch.manual_seed(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , )
UpperCAmelCase_ : List[Any] = floats_tensor(control_image.shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Any = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __a (lowercase_ , lowercase_ , unittest.TestCase ):
__a : Optional[int] = StableDiffusionControlNetImgaImgPipeline
__a : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__a : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__magic_name__ : Optional[int] ):
if isinstance(__magic_name__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase_ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ : Any = CLIPTextModel(__magic_name__ )
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : List[str] = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase_ : Tuple = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any , __magic_name__ : Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : Union[str, Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ),
]
UpperCAmelCase_ : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : List[str] = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
UpperCAmelCase_ : List[str] = 1_0.0
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : List[str] = steps
UpperCAmelCase_ : Optional[Any] = scale
UpperCAmelCase_ : List[Any] = pipe(**__magic_name__ )[0]
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : List[Any] = steps
UpperCAmelCase_ : Dict = scale
UpperCAmelCase_ : List[Any] = pipe(**__magic_name__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = steps
UpperCAmelCase_ : Optional[Any] = scale
UpperCAmelCase_ : str = pipe(**__magic_name__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase_ : Any = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : List[str] = steps
UpperCAmelCase_ : List[Any] = scale
UpperCAmelCase_ : Tuple = pipe(**__magic_name__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__magic_name__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ , controlnet=__magic_name__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ : Optional[int] = '''evil space-punk bird'''
UpperCAmelCase_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ : str = pipe(
__magic_name__ , __magic_name__ , control_image=__magic_name__ , generator=__magic_name__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ : List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
UpperCAmelCase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 720
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : str = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __a (lowerCamelCase ):
__a : Tuple = """open-llama"""
def __init__( self : Dict , __magic_name__ : Optional[Any]=10_00_00 , __magic_name__ : Tuple=40_96 , __magic_name__ : Union[str, Any]=1_10_08 , __magic_name__ : List[Any]=32 , __magic_name__ : Optional[Any]=32 , __magic_name__ : List[str]="silu" , __magic_name__ : Optional[int]=20_48 , __magic_name__ : str=0.0_2 , __magic_name__ : Any=1E-6 , __magic_name__ : Tuple=True , __magic_name__ : Any=0 , __magic_name__ : Dict=1 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[Any]=True , __magic_name__ : Any=True , __magic_name__ : int=None , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Dict = rms_norm_eps
UpperCAmelCase_ : List[Any] = use_cache
UpperCAmelCase_ : Dict = kwargs.pop(
'''use_memorry_efficient_attention''' , __magic_name__ )
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_dropout_prob
UpperCAmelCase_ : List[Any] = use_stable_embedding
UpperCAmelCase_ : List[str] = shared_input_output_embedding
UpperCAmelCase_ : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
UpperCAmelCase_ : Dict = self.rope_scaling.get('''type''' , __magic_name__ )
UpperCAmelCase_ : Tuple = self.rope_scaling.get('''factor''' , __magic_name__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 721
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __a (lowerCamelCase ):
__a : Tuple = "levit"
def __init__( self : List[str] , __magic_name__ : Optional[Any]=2_24 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Optional[Any]=16 , __magic_name__ : str=[1_28, 2_56, 3_84] , __magic_name__ : Union[str, Any]=[4, 8, 12] , __magic_name__ : int=[4, 4, 4] , __magic_name__ : List[Any]=[16, 16, 16] , __magic_name__ : Dict=0 , __magic_name__ : Union[str, Any]=[2, 2, 2] , __magic_name__ : int=[2, 2, 2] , __magic_name__ : Any=0.0_2 , **__magic_name__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Tuple = kernel_size
UpperCAmelCase_ : int = stride
UpperCAmelCase_ : List[Any] = padding
UpperCAmelCase_ : int = hidden_sizes
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[Any] = key_dim
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Tuple = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __a (lowerCamelCase ):
__a : Optional[Any] = version.parse("1.11" )
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Any ) -> float:
"""simple docstring"""
return 1E-4
| 700
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 701
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Tuple = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 0
|
'''simple docstring'''
import operator as op
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[Any] = lambda SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase_ : Optional[Any] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ), '''Action'''.center(12 ), '''Stack''', sep=''' | ''' )
print('''-''' * (30 + len(SCREAMING_SNAKE_CASE__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(SCREAMING_SNAKE_CASE__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''' )
else:
UpperCAmelCase_ : int = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''' )
UpperCAmelCase_ : int = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''' )
stack.append(
str(opr[x](int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(12 ), ''','''.join(SCREAMING_SNAKE_CASE__ ), sep=''' | ''', )
return int(stack[0] )
if __name__ == "__main__":
snake_case_ : List[Any] = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 703
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Tuple = 32
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any:
model.eval()
UpperCAmelCase_ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['''lr''']
UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] )
UpperCAmelCase_ : Optional[int] = int(config['''seed'''] )
UpperCAmelCase_ : List[str] = int(config['''batch_size'''] )
UpperCAmelCase_ : Optional[int] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
UpperCAmelCase_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, )
else:
UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' )
UpperCAmelCase_ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCAmelCase_ : List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCAmelCase_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1
UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCAmelCase_ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCAmelCase_ : Tuple = F"""epoch_{epoch}"""
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = accuracy
UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0]
UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr''']
UpperCAmelCase_ : Tuple = epoch
UpperCAmelCase_ : Dict = overall_step
accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 644
| 0
|
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case_ : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __a (datasets.BuilderConfig ):
__a : Optional[datasets.Features] = None
__a : str = "utf-8"
__a : Optional[str] = None
__a : Optional[str] = None
__a : bool = True # deprecated
__a : Optional[int] = None # deprecated
__a : int = 10 << 20 # 10MB
__a : Optional[bool] = None
class __a (datasets.ArrowBasedBuilder ):
__a : Optional[int] = JsonConfig
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
UpperCAmelCase_ : Optional[int] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[str] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
UpperCAmelCase_ : Tuple = data_files
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : Optional[int] = [files]
UpperCAmelCase_ : Any = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCAmelCase_ : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase_ : List[Any] = [files]
UpperCAmelCase_ : Any = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ : int = self.config.features.arrow_schema.field(__magic_name__ ).type
UpperCAmelCase_ : Optional[int] = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ : Optional[Any] = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ : List[str] = json.load(__magic_name__ )
# We keep only the field we are interested in
UpperCAmelCase_ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
UpperCAmelCase_ : int = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ : List[Any] = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ : Optional[int] = dataset
UpperCAmelCase_ : int = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , '''rb''' ) as f:
UpperCAmelCase_ : Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ : List[Any] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ : List[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
UpperCAmelCase_ : List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ : List[Any] = batch.decode(self.config.encoding , errors=__magic_name__ ).encode('''utf-8''' )
try:
while True:
try:
UpperCAmelCase_ : Union[str, Any] = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__magic_name__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ : int = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__magic_name__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ : str = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ : Optional[int] = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
UpperCAmelCase_ : Union[str, Any] = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__magic_name__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__magic_name__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case_ : Optional[Any] = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case_ : Optional[int] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Any = SavedModel()
UpperCAmelCase_ : Any = []
with open(os.path.join(SCREAMING_SNAKE_CASE__, '''utils''', '''tf_ops''', '''onnx.json''' ) ) as f:
UpperCAmelCase_ : int = json.load(SCREAMING_SNAKE_CASE__ )['''opsets''']
for i in range(1, opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE__ )] )
with open(SCREAMING_SNAKE_CASE__, '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : Dict = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : Any = sorted(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE__ )
if strict and len(SCREAMING_SNAKE_CASE__ ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE__ ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*SCREAMING_SNAKE_CASE__, sep='''\n''' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
snake_case_ : List[str] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 706
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Tuple = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# create attention mask
UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
UpperCAmelCase_ : Any = self.seq_length // 2
UpperCAmelCase_ : Tuple = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : str = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval()
UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ )
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state''']
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ )
model.to(__magic_name__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = BioGptModel(__magic_name__ )
UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__a : Union[str, Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = BioGptModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : Tuple = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : List[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Tuple = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ )
UpperCAmelCase_ : Any = model.generate(
input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Tuple = input_dict['''input_ids''']
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[int] = '''multi_label_classification'''
UpperCAmelCase_ : int = input_dict['''input_ids''']
UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ )
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
UpperCAmelCase_ : Optional[int] = 4_23_84
UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ )
UpperCAmelCase_ : Optional[int] = model.generate(
**__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
| 644
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 0
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
snake_case_ : List[str] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __a :
def __init__( self : Any , __magic_name__ : int = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
UpperCAmelCase_ : Optional[Any] = primes[group]['''prime''']
UpperCAmelCase_ : str = primes[group]['''generator''']
UpperCAmelCase_ : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = pow(self.generator , self.__private_key , self.prime )
return hex(__magic_name__ )[2:]
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__magic_name__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = int(__magic_name__ , base=16 )
if not self.is_valid_public_key(__magic_name__ ):
raise ValueError('''Invalid public key''' )
UpperCAmelCase_ : Tuple = pow(__magic_name__ , self.__private_key , self.prime )
return shaaaa(str(__magic_name__ ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__magic_name__ , (prime - 1) // 2 , __magic_name__ ) == 1
)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int = 14 ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = int(__magic_name__ , base=16 )
UpperCAmelCase_ : Union[str, Any] = int(__magic_name__ , base=16 )
UpperCAmelCase_ : Any = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__magic_name__ , __magic_name__ ):
raise ValueError('''Invalid public key''' )
UpperCAmelCase_ : List[Any] = pow(__magic_name__ , __magic_name__ , __magic_name__ )
return shaaaa(str(__magic_name__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ : int = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : Tuple = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(__magic_name__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1E-3 ) )
@slow
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ : Tuple = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : Tuple = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ : int = model(__magic_name__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __magic_name__ , atol=1E-3 ) )
| 709
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __a (lowerCamelCase ):
__a : Tuple = ["pixel_values"]
def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : List[Any] = size_divisor
UpperCAmelCase_ : Any = resample
super().__init__(**__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ : Dict = height // size_divisor * size_divisor
UpperCAmelCase_ : Dict = width // size_divisor * size_divisor
UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images]
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 644
| 0
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = R'''\w+[.]\d+'''
UpperCAmelCase_ : Tuple = re.findall(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for pat in pats:
UpperCAmelCase_ : Optional[int] = key.replace(SCREAMING_SNAKE_CASE__, '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : Union[str, Any] = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Dict=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : Optional[int] = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Any = flatten_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : List[str] = rename_key(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase_ : Optional[Any] = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Dict = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
| 710
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644
| 0
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[str]=7_68 ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCAmelCase_ : str = proj_size
UpperCAmelCase_ : Dict = CLIPVisionModel(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = PaintByExampleMapper(__magic_name__ )
UpperCAmelCase_ : Tuple = nn.LayerNorm(config.hidden_size )
UpperCAmelCase_ : Dict = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase_ : Any = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple=False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model(pixel_values=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = clip_output.pooler_output
UpperCAmelCase_ : Any = self.mapper(latent_states[:, None] )
UpperCAmelCase_ : Dict = self.final_layer_norm(__magic_name__ )
UpperCAmelCase_ : str = self.proj_out(__magic_name__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __a (nn.Module ):
def __init__( self : Union[str, Any] , __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : int = (config.num_hidden_layers + 1) // 5
UpperCAmelCase_ : Dict = config.hidden_size
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(__magic_name__ , __magic_name__ , __magic_name__ , activation_fn='''gelu''' , attention_bias=__magic_name__ )
for _ in range(__magic_name__ )
] )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
for block in self.blocks:
UpperCAmelCase_ : Any = block(__magic_name__ )
return hidden_states
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __a (lowerCamelCase ):
__a : int = "dandelin/vilt-b32-finetuned-vqa"
__a : Any = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__a : Any = "image_qa"
__a : str = AutoProcessor
__a : Any = AutoModelForVisualQuestionAnswering
__a : List[Any] = ["image", "text"]
__a : int = ["text"]
def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
UpperCAmelCase_ : List[Any] = False
if num < 0:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = -num
UpperCAmelCase_ : list[int] = []
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __a :
def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = value
UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class __a :
def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = root
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.root )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
UpperCAmelCase_ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
UpperCAmelCase_ : Optional[Any] = new_children
else:
UpperCAmelCase_ : Optional[int] = new_children
else:
UpperCAmelCase_ : List[str] = new_children
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.root is None
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase_ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase_ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase_ : List[Any] = new_node
break
else:
UpperCAmelCase_ : Union[str, Any] = parent_node.right
UpperCAmelCase_ : Union[str, Any] = parent_node
def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase_ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right
return node
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
UpperCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase_ : Any = node.right
return node
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
UpperCAmelCase_ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase_ : Union[str, Any] = self.root
while node.left is not None:
UpperCAmelCase_ : Dict = node.left
return node
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
UpperCAmelCase_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase_ : Optional[int] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
UpperCAmelCase_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]:
UpperCAmelCase_ : Any = []
if curr_node is not None:
UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE__ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 644
| 0
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
snake_case_ : Any = False
try:
snake_case_ : Optional[int] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __a :
def __init__( self : str , __magic_name__ : str = None , __magic_name__ : list = [] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = choices
UpperCAmelCase_ : Any = prompt
if sys.platform == "win32":
UpperCAmelCase_ : List[Any] = '''*'''
else:
UpperCAmelCase_ : List[Any] = '''➔ '''
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : str = "" ) -> Tuple:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __magic_name__ )
else:
forceWrite(self.choices[index] , __magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> str:
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__magic_name__ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def UpperCAmelCase__ ( self : str , __magic_name__ : Direction , __magic_name__ : int = 1 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__magic_name__ )
move_cursor(__magic_name__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__magic_name__ )] for number in range(10 )] )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = int(chr(self.current_selection ) )
UpperCAmelCase_ : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __magic_name__ )
else:
return
else:
return
def UpperCAmelCase__ ( self : Any , __magic_name__ : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
UpperCAmelCase_ : Tuple = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__magic_name__ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ : int = int(builtins.input() )
except ValueError:
UpperCAmelCase_ : Tuple = default_choice
else:
UpperCAmelCase_ : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__magic_name__ , '''\n''' )
return choice
| 713
|
'''simple docstring'''
import sys
import turtle
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
snake_case_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 644
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __a (lowerCamelCase ):
__a : Dict = "wavlm"
def __init__( self : Dict , __magic_name__ : Any=32 , __magic_name__ : Dict=7_68 , __magic_name__ : int=12 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[int]=30_72 , __magic_name__ : List[str]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Any=0.0 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : int=1E-5 , __magic_name__ : str="group" , __magic_name__ : Tuple="gelu" , __magic_name__ : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __magic_name__ : Any=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : Optional[int]=False , __magic_name__ : Dict=1_28 , __magic_name__ : int=16 , __magic_name__ : Tuple=3_20 , __magic_name__ : Any=8_00 , __magic_name__ : Optional[int]=False , __magic_name__ : Any=True , __magic_name__ : Tuple=0.0_5 , __magic_name__ : Tuple=10 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[str]=0.0 , __magic_name__ : Tuple=10 , __magic_name__ : Optional[Any]=3_20 , __magic_name__ : Tuple=2 , __magic_name__ : Dict=0.1 , __magic_name__ : Any=1_00 , __magic_name__ : Dict=2_56 , __magic_name__ : Any=2_56 , __magic_name__ : Tuple=0.1 , __magic_name__ : Dict="mean" , __magic_name__ : Dict=False , __magic_name__ : List[str]=False , __magic_name__ : int=2_56 , __magic_name__ : Optional[int]=(5_12, 5_12, 5_12, 5_12, 15_00) , __magic_name__ : Union[str, Any]=(5, 3, 3, 1, 1) , __magic_name__ : str=(1, 2, 3, 1, 1) , __magic_name__ : Tuple=5_12 , __magic_name__ : Any=80 , __magic_name__ : Dict=0 , __magic_name__ : List[Any]=1 , __magic_name__ : int=2 , __magic_name__ : List[str]=False , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=2 , __magic_name__ : str=3 , __magic_name__ : Any=None , **__magic_name__ : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = feat_extract_norm
UpperCAmelCase_ : Union[str, Any] = feat_extract_activation
UpperCAmelCase_ : List[Any] = list(__magic_name__ )
UpperCAmelCase_ : Any = list(__magic_name__ )
UpperCAmelCase_ : List[str] = list(__magic_name__ )
UpperCAmelCase_ : Tuple = conv_bias
UpperCAmelCase_ : Any = num_buckets
UpperCAmelCase_ : Union[str, Any] = max_bucket_distance
UpperCAmelCase_ : Tuple = num_conv_pos_embeddings
UpperCAmelCase_ : Optional[Any] = num_conv_pos_embedding_groups
UpperCAmelCase_ : Union[str, Any] = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : Optional[int] = feat_proj_dropout
UpperCAmelCase_ : Any = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_ctc_classes
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Tuple = do_stable_layer_norm
UpperCAmelCase_ : List[Any] = use_weighted_layer_sum
UpperCAmelCase_ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : Dict = mask_time_length
UpperCAmelCase_ : Dict = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : Union[str, Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : str = num_codevectors_per_group
UpperCAmelCase_ : List[Any] = num_codevector_groups
UpperCAmelCase_ : str = contrastive_logits_temperature
UpperCAmelCase_ : List[Any] = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : int = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : Union[str, Any] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Dict = add_adapter
UpperCAmelCase_ : Union[str, Any] = adapter_kernel_size
UpperCAmelCase_ : str = adapter_stride
UpperCAmelCase_ : Any = num_adapter_layers
UpperCAmelCase_ : List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[Any] = list(__magic_name__ )
UpperCAmelCase_ : str = list(__magic_name__ )
UpperCAmelCase_ : Dict = list(__magic_name__ )
UpperCAmelCase_ : List[str] = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 714
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str:
return " ".join(
''''''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 715
|
'''simple docstring'''
snake_case_ : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 644
| 0
|
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __a (lowerCamelCase ):
__a : Dict = "microsoft/speecht5_tts"
__a : List[str] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
__a : Dict = "text_reader"
__a : int = SpeechTaProcessor
__a : List[str] = SpeechTaForTextToSpeech
__a : str = SpeechTaHifiGan
__a : int = ["text"]
__a : int = ["audio"]
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
if self.post_processor is None:
UpperCAmelCase_ : List[Any] = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : str=None ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.pre_processor(text=__magic_name__ , return_tensors='''pt''' , truncation=__magic_name__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
UpperCAmelCase_ : List[str] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
UpperCAmelCase_ : Any = torch.tensor(embeddings_dataset[73_05]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[int] ) -> int:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**__magic_name__ )
def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(__magic_name__ ).cpu().detach()
| 716
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a (unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet
UpperCAmelCase_ : Dict = KarrasVeScheduler()
UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0]
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = KarrasVeScheduler()
UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 644
| 0
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Any = logging.get_logger(__name__)
class __a :
def __init__( self : int , __magic_name__ : str , __magic_name__ : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = question_encoder
UpperCAmelCase_ : str = generator
UpperCAmelCase_ : Dict = self.question_encoder
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if os.path.isfile(__magic_name__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
UpperCAmelCase_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
UpperCAmelCase_ : Optional[int] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , __magic_name__ : str , **__magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ : Dict = kwargs.pop('''config''' , __magic_name__ )
if config is None:
UpperCAmelCase_ : Optional[Any] = RagConfig.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self : Union[str, Any] , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[str] ) -> Any:
"""simple docstring"""
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *__magic_name__ : List[str] , **__magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self.question_encoder
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.generator
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : Optional[List[str]] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "longest" , __magic_name__ : str = None , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
UpperCAmelCase_ : List[Any] = self.current_tokenizer.model_max_length
UpperCAmelCase_ : List[Any] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ : str = self.current_tokenizer.model_max_length
UpperCAmelCase_ : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : Any = labels['''input_ids''']
return model_inputs
| 717
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __a (unittest.TestCase ):
def __init__( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : str=7 , __magic_name__ : int=3 , __magic_name__ : List[Any]=18 , __magic_name__ : List[Any]=30 , __magic_name__ : Union[str, Any]=4_00 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=True , __magic_name__ : List[str]=True , __magic_name__ : str=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : int = min_resolution
UpperCAmelCase_ : int = max_resolution
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb
UpperCAmelCase_ : Dict = [5_12, 10_24, 20_48, 40_96]
UpperCAmelCase_ : Any = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
UpperCAmelCase_ : int = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : Optional[Any] = 20_48
UpperCAmelCase_ : int = image_processor(__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ : Optional[Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__magic_name__ ):
UpperCAmelCase_ : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
UpperCAmelCase_ : Tuple = '''Hello'''
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : int = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a (lowerCamelCase , unittest.TestCase ):
__a : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ : Optional[int] = 3
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_convert_rgb''' ) )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processor(
__magic_name__ , return_tensors='''pt''' , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 718
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : bool = False ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Any = F"""Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : List[Any] = F"""Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = input_str.split('''_''' )
UpperCAmelCase_ : int = 0 if use_pascal else 1
UpperCAmelCase_ : Dict = words[start_index:]
UpperCAmelCase_ : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase_ : List[Any] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644
| 0
|
import os
def lowerCamelCase_ ( ) -> str:
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + '''/p022_names.txt''' ) as file:
UpperCAmelCase_ : Optional[int] = str(file.readlines()[0] )
UpperCAmelCase_ : Optional[int] = names.replace('''"''', '''''' ).split(''',''' )
names.sort()
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE__ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE__ ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase_ : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 720
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
snake_case_ : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
snake_case_ : str = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
UpperCAmelCase_ : Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(__magic_name__ , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase_ : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase_ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
UpperCAmelCase_ : Tuple = black.format_str(__magic_name__ , mode=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__magic_name__ , '''w''' , newline='''\n''' ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , '''r''' ) as f:
self.assertTrue(f.read() , __magic_name__ )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __magic_name__ ) , )
# Copy consistency with a really long name
UpperCAmelCase_ : Tuple = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __magic_name__ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __magic_name__ ) , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
UpperCAmelCase_ : Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
UpperCAmelCase_ : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
UpperCAmelCase_ : Optional[int] = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] )
self.assertFalse(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
UpperCAmelCase_ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCAmelCase_ : Any = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__magic_name__ , __magic_name__ )
| 721
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.